def run(test, params, env):
    """
    Test command: secret-dumpxml <secret>

    Output attributes of a secret as an XML dump to stdout.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    status_error = ("yes" == params.get("status_error", "no"))
    secret_ref = params.get("secret_ref")

    if secret_ref == "secret_valid_uuid":
        # Generate valid uuid
        cmd = "uuidgen"
        status, uuid = commands.getstatusoutput(cmd)
        if status:
            raise error.TestNAError("Failed to generate valid uuid")

    # Get a full path of tmpfile, the tmpfile need not exist
    tmp_dir = data_dir.get_tmp_dir()
    volume_path = os.path.join(tmp_dir, "secret_volume")

    secret_xml = """
<secret ephemeral='no' private='yes'>
  <uuid>%s</uuid>
  <usage type='volume'>
    <volume>%s</volume>
  </usage>
</secret>
""" % (uuid, volume_path)

    # Write secret xml into a tmpfile
    tmp_file = tempfile.NamedTemporaryFile(prefix=("secret_xml_"),
                                           dir=tmp_dir)
    xmlfile = tmp_file.name
    tmp_file.close()

    fd = open(xmlfile, 'w')
    fd.write(secret_xml)
    fd.close()

    try:
        virsh.secret_define(xmlfile, debug=True)

        cmd_result = virsh.secret_dumpxml(uuid, debug=True)
        output = cmd_result.stdout.strip()
        if not status_error and cmd_result.exit_status:
            raise error.TestFail("Dumping the xml of secret object failed")

        match_string = "<uuid>%s</uuid>" % uuid
        if not re.search(match_string, output):
            raise error.TestFail("The secret xml is not valid")
    finally:
        #Cleanup
        virsh.secret_undefine(uuid, debug=True)

        if os.path.exists(xmlfile):
            os.remove(xmlfile)
def cleanup(params):
    """
    Cleanup secret and volume
    :params: the parameter dictionary
    """
    uuid = params.get("secret_uuid")
    usage_volume = params.get("secret_usage_volume")

    os.unlink(usage_volume)

    if uuid:
        result = virsh.secret_undefine(uuid)
        status = result.exit_status
        if status:
            raise error.TestFail(result.stderr)
Exemplo n.º 3
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    vg_name = params.get("virt_disk_vg_name", "vg_test_0")
    lv_name = params.get("virt_disk_lv_name", "lv_test_0")
    driver_packed = params.get("driver_packed", "on")
    disk_packed = "yes" == params.get("disk_packed", "no")
    scsi_packed = "yes" == params.get("scsi_packed", "no")

    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    if pool_type == "iscsi-direct":
        if not libvirt_version.version_compare(4, 7, 0):
            test.cancel("iscsi-direct pool is not supported in"
                        " current libvirt version.")
    if ((disk_packed or scsi_packed)
            and not libvirt_version.version_compare(6, 3, 0)):
        test.cancel("The virtio packed attribute is not supported in"
                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Fix no more PCI slots issue in certain cases.
    vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    machine_type = params.get("machine_type", "pc")
    if machine_type == 'q35':
        vm_dump_xml.remove_all_device_by_type('controller')
        machine_list = vm_dump_xml.os.machine.split("-")
        vm_dump_xml.set_os_attrs(
            **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
        q35_pcie_dict0 = {
            'controller_model': 'pcie-root',
            'controller_type': 'pci',
            'controller_index': 0
        }
        q35_pcie_dict1 = {
            'controller_model': 'pcie-root-port',
            'controller_type': 'pci'
        }
        vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict0))
        # Add enough controllers to match multiple times disk attaching requirements
        for i in list(range(1, 12)):
            q35_pcie_dict1.update({'controller_index': "%d" % i})
            vm_dump_xml.add_device(
                libvirt.create_controller_xml(q35_pcie_dict1))
        vm_dump_xml.sync()

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        if disk_type == "block":
            iscsi_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=True,
                image_size="1G",
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        else:
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size='1G',
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = pool_target
            if chap_auth:
                pool_src_xml.auth_type = "chap"
                pool_src_xml.auth_username = chap_user
                pool_src_xml.secret_usage = secret_usage_target
                poolxml.set_source(pool_src_xml)
            if pool_type == "iscsi-direct":
                iscsi_initiator = params.get('iscsi_initiator')
                pool_src_xml.iqn_name = iscsi_initiator
                poolxml.set_source(pool_src_xml)
            # Create iscsi/iscsi-direct pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            xml = virsh.pool_dumpxml(disk_src_pool)
            logging.debug("Pool '%s' XML:\n%s", disk_src_pool, xml)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            if pool_type == "iscsi":
                process.run('qemu-img create -f qcow2 %s %s' %
                            (vol_path, '100M'),
                            shell=True,
                            verbose=True)
            else:
                # Get iscsi URL to create a qcow2 volume disk
                disk_path = ("iscsi://[%s]/%s/%s" %
                             (disk_src_host, iscsi_target, lun_num))
                blk_source = "/mnt/test.qcow2"
                process.run('qemu-img create -f qcow2 %s %s' %
                            (blk_source, '100M'),
                            shell=True,
                            verbose=True)
                process.run('qemu-img convert -O qcow2 %s %s' %
                            (blk_source, disk_path),
                            shell=True,
                            verbose=True)

        # Create block device
        if disk_type == "block":
            logging.debug("iscsi dev name: %s", iscsi_target)
            lv_utils.vg_create(vg_name, iscsi_target)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            if pool_type == "iscsi":
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2',
                    'source_mode': disk_src_mode
                }
            # iscsi-direct pool don't include source_mode option
            else:
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2'
                }
        elif disk_type == "block":
            disk_params_src = {
                'source_file': device_source,
                'driver_type': 'raw'
            }
            # Start guest with packed attribute in disk
            if disk_packed:
                disk_params_src['driver_packed'] = driver_packed
            # Start guest with packed attribute in scsi controller
            if scsi_packed:
                scsi_controller = Controller("controller")
                scsi_controller.type = "scsi"
                scsi_controller.model = "virtio-scsi"
                scsi_controller.driver = {'packed': driver_packed}
                vm_dump_xml.add_device(scsi_controller)
                vm_dump_xml.sync()
        else:
            test.cancel("Unsupported disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth and disk_type != "volume":
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            # virsh snapshot-revert is not supported on combined internal and external snapshots
            # see more details from,https://bugzilla.redhat.com/show_bug.cgi?id=1733173
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            virsh.snapshot_create_as(vm_name,
                                     snapshot_name2,
                                     ignore_status=False,
                                     debug=True)

            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "start_with_packed":
            expect_xml_line = "packed=\"%s\"" % driver_packed
            libvirt.check_dumpxml(vm, expect_xml_line)
            expect_qemu_line = "packed=%s" % driver_packed
            libvirt.check_qemu_cmd_line(expect_qemu_line)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupported operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name,
                                       disk_target,
                                       wait_remove_event=True)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if disk_type == "block":
                clean_up_lvm(iscsi_target, vg_name, lv_name)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
Exemplo n.º 4
0
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(
                       mon_host, key_opt,
                       os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and virsh.pool_state_dict().has_key(pool_name):
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()
Exemplo n.º 5
0
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        vmxml_backup.sync("--snapshots-metadata")
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare pool, volume.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def create_pool(p_name, p_type, p_target):
        """
        Define and start a pool.

        :param p_name. Pool name.
        :param p_type. Pool type.
        :param p_target. Pool target path.
        """
        p_xml = pool_xml.PoolXML(pool_type=p_type)
        p_xml.name = p_name
        p_xml.target_path = p_target

        if not os.path.exists(p_target):
            os.mkdir(p_target)
        p_xml.xmltreefile.write()
        ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_build(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_start(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, target_encrypt_params, vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param target_encrypt_params encrypt parameters in dict.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.encryption = volxml.new_encryption(**target_encrypt_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_secret(vol_path):
        """
        Create secret.

        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        libvirt.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as e:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            secret_password_no_encoded.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string,
                                     **virsh_dargs)
        libvirt.check_exit_status(ret)
        return encryption_uuid

    def get_secret_list():
        """
        Get secret list.

        :return: secret list
        """
        logging.info("Get secret list ...")
        secret_list = virsh.secret_list().stdout.strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is not empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status(
                "rpm -q parted || "
                "yum install -y parted", 300)
            if rpm_stat != 0:
                test.fail("Failed to query/install parted, make sure"
                          " that you have usable repo in guest")

            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            device_source = os.path.join(os.sep, 'dev', added_part)
            libvirt.mk_label(device_source, session=session)
            libvirt.mk_part(device_source, size="10M", session=session)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % added_part)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            session.close()
            if s != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    encryption_in_source = "yes" == params.get("encryption_in_source")
    encryption_out_source = "yes" == params.get("encryption_out_source")
    if encryption_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Cannot put <encryption> inside disk <source> in "
                    "this libvirt version.")
    # Pool/Volume options.
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    volume_name = params.get("vol_name")
    volume_alloc = params.get("vol_alloc")
    volume_cap_unit = params.get("vol_cap_unit")
    volume_cap = params.get("vol_cap")
    volume_target_path = params.get("target_path")
    volume_target_format = params.get("target_format")
    volume_target_encypt = params.get("target_encypt", "")
    volume_target_label = params.get("target_label")

    hotplug = "yes" == params.get("virt_disk_device_hotplug")
    status_error = "yes" == params.get("status_error")
    secret_type = params.get("secret_type", "passphrase")
    secret_password_no_encoded = params.get("secret_password_no_encoded",
                                            "redhat")
    virt_disk_qcow2_format = "yes" == params.get("virt_disk_qcow2_format")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    sec_encryption_uuid = None
    try:
        # Prepare the disk.
        sec_uuids = []
        # Clean up dirty secrets in test environments if there are.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        create_pool(pool_name, pool_type, pool_target)
        vol_params = {
            "name": volume_name,
            "capacity": int(volume_cap),
            "allocation": int(volume_alloc),
            "format": volume_target_format,
            "path": volume_target_path,
            "label": volume_target_label,
            "capacity_unit": volume_cap_unit
        }
        vol_encryption_params = {}
        vol_encryption_params.update({"format": volume_target_encypt})
        # For any disk format other than qcow2, it need create secret firstly.
        if not virt_disk_qcow2_format:
            # create secret.
            sec_encryption_uuid = create_secret(volume_target_path)
            sec_uuids.append(sec_encryption_uuid)
            vol_encryption_params.update(
                {"secret": {
                    "type": secret_type,
                    "uuid": sec_encryption_uuid
                }})
        try:
            # If Libvirt version is lower than 2.5.0
            # Creating luks encryption volume is not supported,so skip it.
            create_vol(pool_name, vol_encryption_params, vol_params)
        except AssertionError as info:
            err_msgs = ("create: invalid option")
            if str(info).count(err_msgs):
                test.error("Creating luks encryption volume "
                           "is not supported on this libvirt version")
            else:
                test.error("Failed to create volume." "Error: %s" % str(info))
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        if device_type == "file":
            dev_attrs = "file"
        elif device_type == "dir":
            dev_attrs = "dir"
        else:
            dev_attrs = "dev"
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {
                dev_attrs: volume_target_path
            }})
        disk_xml.driver = {
            "name": "qemu",
            "type": volume_target_format,
            "cache": "none"
        }
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuids.append(v_xml.encryption.secret["uuid"])
        if not status_error:
            logging.debug("vol info -- format: %s, type: %s, uuid: %s",
                          v_xml.encryption.format,
                          v_xml.encryption.secret["type"],
                          v_xml.encryption.secret["uuid"])
            encryption_dict = {
                "encryption": v_xml.encryption.format,
                "secret": {
                    "type": v_xml.encryption.secret["type"],
                    "uuid": v_xml.encryption.secret["uuid"]
                }
            }
            if encryption_in_source:
                disk_source.encryption = disk_xml.new_encryption(
                    **encryption_dict)
            if encryption_out_source:
                disk_xml.encryption = disk_xml.new_encryption(
                    **encryption_dict)
        disk_xml.source = disk_source
        logging.debug("disk xml is:\n%s" % disk_xml)
        if not hotplug:
            # Sync VM xml.
            vmxml.add_device(disk_xml)
            vmxml.sync()

        try:
            # Start the VM and do disk hotplug if required,
            # then check disk status in vm.
            # Note that LUKS encrypted virtual disk without <encryption>
            # can be normally started or attached since qemu will just treat
            # it as RAW, so we don't test LUKS with status_error=TRUE.
            vm.start()
            vm.wait_for_login()
            if status_error:
                if hotplug:
                    logging.debug("attaching disk, expecting error...")
                    result = virsh.attach_device(vm_name, disk_xml.xml)
                    libvirt.check_exit_status(result, status_error)
                else:
                    test.fail("VM started unexpectedly.")
            else:
                if hotplug:
                    result = virsh.attach_device(vm_name,
                                                 disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
                    result = virsh.detach_device(vm_name,
                                                 disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                else:
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
        except virt_vm.VMStartError as e:
            if status_error:
                if hotplug:
                    test.fail(
                        "In hotplug scenario, VM should "
                        "start successfully but not."
                        "Error: %s", str(e))
                else:
                    logging.debug(
                        "VM failed to start as expected."
                        "Error: %s", str(e))
            else:
                # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
                # longer supported.
                err_msgs = ("AES-CBC encrypted qcow2 images is"
                            " no longer supported in system emulators")
                if str(e).count(err_msgs):
                    test.cancel(err_msgs)
                else:
                    test.fail("VM failed to start." "Error: %s" % str(e))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
Exemplo n.º 7
0
def run(test, params, env):
    """
    Do test for vol-download and vol-upload

    Basic steps are
    1. Create pool with type defined in cfg
    2. Create image with writing data in it
    3. Get md5 value before operation
    4. Do vol-download/upload with options(offset, length)
    5. Check md5 value after operation
    """

    pool_type = params.get("vol_download_upload_pool_type")
    pool_name = params.get("vol_download_upload_pool_name")
    pool_target = params.get("vol_download_upload_pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("vol_download_upload_vol_name")
    file_name = params.get("vol_download_upload_file_name")
    file_path = os.path.join(data_dir.get_tmp_dir(), file_name)
    offset = params.get("vol_download_upload_offset")
    length = params.get("vol_download_upload_length")
    capacity = params.get("vol_download_upload_capacity")
    allocation = params.get("vol_download_upload_allocation")
    frmt = params.get("vol_download_upload_format")
    operation = params.get("vol_download_upload_operation")
    create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes"))
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")
    b_luks_encrypt = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    sparse_option_support = "yes" == params.get("sparse_option_support", "yes")
    with_clusterSize = "yes" == params.get("with_clusterSize")
    vol_clusterSize = params.get("vol_clusterSize", "64")
    vol_clusterSize_unit = params.get("vol_clusterSize_unit")
    vol_format = params.get("vol_format", "qcow2")
    libvirt_version.is_libvirt_feature_supported(params)

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    if uri and not utils_split_daemons.is_modular_daemon():
        uri = "qemu:///system"
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.error("API acl test not supported in current"
                       " libvirt version.")
    # Destroy VM.
    if vm.is_alive():
        vm.destroy(gracefully=False)
    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest",
                     pre_disk_vol=["50M"])
        # According to BZ#1138523, we need inpect the right name
        # (disk partition) for new volume
        if pool_type == "disk":
            vol_name = utlv.new_disk_vol_name(pool_name)
            if vol_name is None:
                test.error("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)
        if create_vol:
            if b_luks_encrypt:
                if not libvirt_version.version_compare(2, 0, 0):
                    test.cancel("LUKS format not supported in "
                                "current libvirt version")
                params['sec_volume'] = os.path.join(pool_target, vol_name)
                luks_sec_uuid = utlv.create_secret(params)
                ret = virsh.secret_set_value(luks_sec_uuid,
                                             encryption_password,
                                             encode=True)
                utlv.check_exit_status(ret)
                secret_uuids.append(luks_sec_uuid)
                vol_arg = {}
                vol_arg['name'] = vol_name
                vol_arg['capacity'] = int(capacity)
                vol_arg['allocation'] = int(allocation)
                if with_clusterSize:
                    vol_arg['format'] = vol_format
                    vol_arg['clusterSize'] = int(vol_clusterSize)
                    vol_arg['clusterSize_unit'] = vol_clusterSize_unit
                create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg)
            else:
                pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name)

        virsh.pool_refresh(pool_name, debug=True)
        vol_list = virsh.vol_list(pool_name, debug=True).stdout.strip()
        # iscsi volume name is different from others
        if pool_type == "iscsi":
            # Due to BZ 1843791, the volume cannot be obtained sometimes.
            if len(vol_list.splitlines()) < 3:
                test.fail("Failed to get iscsi type volume.")
            vol_name = vol_list.split('\n')[2].split()[0]

        vol_path = virsh.vol_path(vol_name, pool_name,
                                  ignore_status=False).stdout.strip()
        logging.debug("vol_path is %s", vol_path)

        # Add command options
        if pool_type is not None:
            options = " --pool %s" % pool_name
        if offset is not None:
            options += " --offset %s" % offset
            offset = int(offset)
        else:
            offset = 0

        if length is not None:
            options += " --length %s" % length
            length = int(length)
        else:
            length = 0
        logging.debug("%s options are %s", operation, options)

        if operation == "upload":
            # write data to file
            write_file(file_path)

            # Set length for calculate the offset + length in the following
            # func get_pre_post_digest() and digest()
            if length == 0:
                length = 1048576

            def get_pre_post_digest():
                """
                Get pre region and post region digest if have offset and length
                :return: pre digest and post digest
                """
                # Get digest of pre region before offset
                if offset != 0:
                    digest_pre = digest(vol_path, 0, offset)
                else:
                    digest_pre = 0
                logging.debug("pre region digest read from %s 0-%s is %s",
                              vol_path, offset, digest_pre)
                # Get digest of post region after offset+length
                digest_post = digest(vol_path, offset + length, 0)
                logging.debug("post region digest read from %s %s-0 is %s",
                              vol_path, offset + length, digest_post)

                return (digest_pre, digest_post)

            # Get pre and post digest before operation for compare
            (ori_pre_digest, ori_post_digest) = get_pre_post_digest()
            ori_digest = digest(file_path, 0, 0)
            logging.debug("ori digest read from %s is %s", file_path,
                          ori_digest)

            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path, ignore_status=True,
                            shell=True)

            # Do volume upload
            result = virsh.vol_upload(vol_name, file_path, options,
                                      unprivileged_user=unpri_user,
                                      uri=uri, debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                (aft_pre_digest, aft_post_digest) = get_pre_post_digest()
                aft_digest = digest(vol_path, offset, length)
                logging.debug("aft digest read from %s is %s", vol_path,
                              aft_digest)

                # Compare the pre and post part before and after
                if ori_pre_digest == aft_pre_digest and \
                   ori_post_digest == aft_post_digest:
                    logging.info("file pre and aft digest match")
                else:
                    test.fail("file pre or post digests do not"
                              "match, in %s", operation)

        if operation == "download":
            # Write data to volume
            write_file(vol_path)

            # Record the digest value before operation
            ori_digest = digest(vol_path, offset, length)
            logging.debug("original digest read from %s is %s", vol_path,
                          ori_digest)

            process.run("touch %s" % file_path, ignore_status=True, shell=True)
            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path, ignore_status=True,
                            shell=True)

            # Do volume download
            result = virsh.vol_download(vol_name, file_path, options,
                                        unprivileged_user=unpri_user,
                                        uri=uri, debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                aft_digest = digest(file_path, 0, 0)
                logging.debug("new digest read from %s is %s", file_path,
                              aft_digest)

        if operation != "mix":
            if result.exit_status != 0:
                test.fail("Fail to %s volume: %s" %
                          (operation, result.stderr))
            # Compare the change part on volume and file
            if ori_digest == aft_digest:
                logging.info("file digests match, volume %s succeed", operation)
            else:
                test.fail("file digests do not match, volume %s failed"
                          % operation)

        if operation == "mix":
            target = params.get("virt_disk_device_target", "vdb")
            disk_file_path = os.path.join(pool_target, file_name)

            # Create one disk xml and attach it to VM.
            custom_disk_xml = create_disk('file', disk_file_path, 'raw', 'file',
                                          'disk', target, 'virtio')
            ret = virsh.attach_device(vm_name, custom_disk_xml.xml,
                                      flagstr="--config", debug=True)
            libvirt.check_exit_status(ret)
            if vm.is_dead():
                vm.start()

            # Write 100M data into disk.
            data_size = 100
            write_disk(test, vm, target, data_size)
            data_size_in_bytes = data_size * 1024 * 1024

            # Refresh directory pool.
            virsh.pool_refresh(pool_name, debug=True)

            # Download volume to local with sparse option.
            download_spare_file = "download-sparse.raw"
            download_file_path = os.path.join(data_dir.get_tmp_dir(), download_spare_file)
            options += " --sparse"
            result = virsh.vol_download(file_name, download_file_path, options,
                                        unprivileged_user=unpri_user,
                                        uri=uri, debug=True)
            libvirt.check_exit_status(result)

            #Check download image size.
            one_g_in_bytes = 1073741824
            download_img_info = utils_misc.get_image_info(download_file_path)
            download_disk_size = int(download_img_info['dsize'])
            if (download_disk_size < data_size_in_bytes or
               download_disk_size >= one_g_in_bytes):
                test.fail("download image size:%d is less than the generated "
                          "data size:%d or greater than or equal to 1G."
                          % (download_disk_size, data_size_in_bytes))

            # Create one upload sparse image file.
            upload_sparse_file = "upload-sparse.raw"
            upload_file_path = os.path.join(pool_target, upload_sparse_file)
            libvirt.create_local_disk('file', upload_file_path, '1', 'raw')

            # Refresh directory pool.
            virsh.pool_refresh(pool_name, debug=True)
            # Do volume upload, upload sparse file which download last time.
            result = virsh.vol_upload(upload_sparse_file, download_file_path, options,
                                      unprivileged_user=unpri_user,
                                      uri=uri, debug=True)
            upload_img_info = utils_misc.get_image_info(upload_file_path)
            upload_disk_size = int(upload_img_info['dsize'])
            if (upload_disk_size < data_size_in_bytes or
               upload_disk_size >= one_g_in_bytes):
                test.fail("upload image size:%d is less than the generated "
                          "data size:%d or greater than or equal to 1G."
                          % (upload_disk_size, data_size_in_bytes))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest")
        for secret_uuid in set(secret_uuids):
            virsh.secret_undefine(secret_uuid)
        if os.path.isfile(file_path):
            os.remove(file_path)
def run(test, params, env):
    """
    Test command: virsh secret-define <file>
                  secret-undefine <secret>
    The testcase is to define or modify a secret
    from an XML file, then undefine it
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    secret_ref = params.get("secret_ref")
    ephemeral = params.get("ephemeral_value", "no")
    private = params.get("private_value", "no")
    modify_volume = ("yes" == params.get("secret_modify_volume", "no"))
    remove_uuid = ("yes" == params.get("secret_remove_uuid", "no"))

    if secret_ref == "secret_valid_uuid":
        # Generate valid uuid
        cmd = "uuidgen"
        status, uuid = commands.getstatusoutput(cmd)
        if status:
            raise error.TestNAError("Failed to generate valid uuid")

    elif secret_ref == "secret_invalid_uuid":
        uuid = params.get(secret_ref)

    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    get_value_acl = "yes" == params.get("get_value_acl", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    get_value_error = "yes" == params.get("get_value_error", "no")

    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    acl_dargs = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True
    }

    # Get a full path of tmpfile, the tmpfile need not exist
    tmp_dir = data_dir.get_tmp_dir()
    volume_path = os.path.join(tmp_dir, "secret_volume")

    secret_xml_obj = SecretXML(ephemeral, private)
    secret_xml_obj.uuid = uuid
    secret_xml_obj.volume = volume_path
    secret_xml_obj.usage = "volume"

    secret_obj_xmlfile = os.path.join(SECRET_DIR, uuid + ".xml")

    # Run the test
    try:
        if define_acl:
            utils.run("chmod 666 %s" % secret_xml_obj.xml)
            cmd_result = virsh.secret_define(secret_xml_obj.xml, **acl_dargs)
        else:
            cmd_result = virsh.secret_define(secret_xml_obj.xml, debug=True)
        libvirt.check_exit_status(cmd_result, define_error)
        if cmd_result.exit_status:
            return

        # Check ephemeral attribute
        exist = os.path.exists(secret_obj_xmlfile)
        if (ephemeral == "yes" and exist) or \
           (ephemeral == "no" and not exist):
            raise error.TestFail("The ephemeral attribute worked not expected")

        # Check private attrbute
        virsh.secret_set_value(uuid, SECRET_BASE64, debug=True)
        if get_value_acl:
            cmd_result = virsh.secret_get_value(uuid, **acl_dargs)
        else:
            cmd_result = virsh.secret_get_value(uuid, debug=True)
        libvirt.check_exit_status(cmd_result, get_value_error)
        status = cmd_result.exit_status
        err_msg = "The private attribute worked not expected"
        if private == "yes" and not status:
            raise error.TestFail(err_msg)
        if private == "no" and status:
            if not get_value_error:
                raise error.TestFail(err_msg)

        if modify_volume:
            volume_path = os.path.join(tmp_dir, "secret_volume_modify")
            secret_xml_obj.volume = volume_path
            cmd_result = virsh.secret_define(secret_xml_obj.xml, debug=True)
            if cmd_result.exit_status == 0:
                raise error.TestFail("Expect fail on redefine after modify "
                                     "volume, but success indeed")
        if remove_uuid:
            secret_xml_obj2 = SecretXML(ephemeral, private)
            secret_xml_obj2.volume = volume_path
            secret_xml_obj2.usage = "volume"
            cmd_result = virsh.secret_define(secret_xml_obj2.xml, debug=True)
            if cmd_result.exit_status == 0:
                raise error.TestFail("Expect fail on redefine after remove "
                                     "uuid, but success indeed")

        if undefine_acl:
            cmd_result = virsh.secret_undefine(uuid, **acl_dargs)
        else:
            cmd_result = virsh.secret_undefine(uuid, debug=True)
            libvirt.check_exit_status(cmd_result, undefine_error)
    finally:
        # cleanup
        virsh.secret_undefine(uuid, ignore_status=True)
        if os.path.exists(volume_path):
            os.unlink(volume_path)
        if os.path.exists(secret_obj_xmlfile):
            os.unlink(secret_obj_xmlfile)
Exemplo n.º 9
0
def run(test, params, env):
    """
    Test command: secret-dumpxml <secret>

    Output attributes of a secret as an XML dump to stdout.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    status_error = ("yes" == params.get("status_error", "no"))
    secret_ref = params.get("secret_ref")

    # acl polkit params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    virsh_dargs = {'debug': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs['unprivileged_user'] = unprivileged_user
        virsh_dargs['uri'] = uri

    if secret_ref == "secret_valid_uuid":
        # Generate valid uuid
        cmd = "uuidgen"
        status, uuid = commands.getstatusoutput(cmd)
        if status:
            raise error.TestNAError("Failed to generate valid uuid")

    # Get a full path of tmpfile, the tmpfile need not exist
    tmp_dir = data_dir.get_tmp_dir()
    volume_path = os.path.join(tmp_dir, "secret_volume")

    secret_xml = """
<secret ephemeral='no' private='yes'>
  <uuid>%s</uuid>
  <usage type='volume'>
    <volume>%s</volume>
  </usage>
</secret>
""" % (uuid, volume_path)

    # Write secret xml into a tmpfile
    tmp_file = tempfile.NamedTemporaryFile(prefix=("secret_xml_"),
                                           dir=tmp_dir)
    xmlfile = tmp_file.name
    tmp_file.close()

    fd = open(xmlfile, 'w')
    fd.write(secret_xml)
    fd.close()

    try:
        virsh.secret_define(xmlfile, debug=True)

        cmd_result = virsh.secret_dumpxml(uuid, **virsh_dargs)
        output = cmd_result.stdout.strip()
        if not status_error and cmd_result.exit_status:
            raise error.TestFail("Dumping the xml of secret object failed")

        match_string = "<uuid>%s</uuid>" % uuid
        if not re.search(match_string, output):
            raise error.TestFail("The secret xml is not valid")
    finally:
        #Cleanup
        virsh.secret_undefine(uuid, debug=True)

        if os.path.exists(xmlfile):
            os.remove(xmlfile)
def run(test, params, env):
    """
    Test command: virsh secret-define <file>
                  secret-undefine <secret>
    The testcase is to define or modify a secret
    from an XML file, then undefine it
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    secret_ref = params.get("secret_ref")
    ephemeral = params.get("ephemeral_value", "no")
    private = params.get("private_value", "no")
    modify_volume = ("yes" == params.get("secret_modify_volume", "no"))
    remove_uuid = ("yes" == params.get("secret_remove_uuid", "no"))

    if secret_ref == "secret_valid_uuid":
        # Generate valid uuid
        cmd = "uuidgen"
        status, uuid = process.getstatusoutput(cmd)
        if status:
            test.cancel("Failed to generate valid uuid")

    elif secret_ref == "secret_invalid_uuid":
        uuid = params.get(secret_ref)

    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    get_value_acl = "yes" == params.get("get_value_acl", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    get_value_error = "yes" == params.get("get_value_error", "no")
    define_readonly = "yes" == params.get("secret_define_readonly", "no")
    undefine_readonly = "yes" == params.get("secret_undefine_readonly", "no")
    expect_msg = params.get("secret_err_msg", "")

    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user,
                 'debug': True}

    # Get a full path of tmpfile, the tmpfile need not exist
    tmp_dir = data_dir.get_tmp_dir()
    volume_path = os.path.join(tmp_dir, "secret_volume")

    secret_xml_obj = SecretXML(ephemeral, private)
    secret_xml_obj.uuid = uuid
    secret_xml_obj.volume = volume_path
    secret_xml_obj.usage = "volume"

    secret_obj_xmlfile = os.path.join(SECRET_DIR, uuid + ".xml")

    # Run the test
    try:
        if define_acl:
            process.run("chmod 666 %s" % secret_xml_obj.xml, shell=True)
            cmd_result = virsh.secret_define(secret_xml_obj.xml, **acl_dargs)
        else:
            cmd_result = virsh.secret_define(secret_xml_obj.xml, debug=True,
                                             readonly=define_readonly)
        libvirt.check_exit_status(cmd_result, define_error)
        if cmd_result.exit_status:
            if define_readonly:
                if not re.search(expect_msg, cmd_result.stderr.strip()):
                    test.fail("Fail to get expect err msg: %s" % expect_msg)
                else:
                    logging.info("Get expect err msg: %s", expect_msg)
            return

        # Check ephemeral attribute
        exist = os.path.exists(secret_obj_xmlfile)
        if (ephemeral == "yes" and exist) or \
           (ephemeral == "no" and not exist):
            test.fail("The ephemeral attribute worked not expected")

        # Check private attrbute
        virsh.secret_set_value(uuid, SECRET_BASE64, debug=True)
        if get_value_acl:
            cmd_result = virsh.secret_get_value(uuid, **acl_dargs)
        else:
            cmd_result = virsh.secret_get_value(uuid, debug=True)
        libvirt.check_exit_status(cmd_result, get_value_error)
        status = cmd_result.exit_status
        err_msg = "The private attribute worked not expected"
        if private == "yes" and not status:
            test.fail(err_msg)
        if private == "no" and status:
            if not get_value_error:
                test.fail(err_msg)

        if modify_volume:
            volume_path = os.path.join(tmp_dir, "secret_volume_modify")
            secret_xml_obj.volume = volume_path
            cmd_result = virsh.secret_define(secret_xml_obj.xml, debug=True)
            if cmd_result.exit_status == 0:
                test.fail("Expect fail on redefine after modify "
                          "volume, but success indeed")
        if remove_uuid:
            secret_xml_obj2 = SecretXML(ephemeral, private)
            secret_xml_obj2.volume = volume_path
            secret_xml_obj2.usage = "volume"
            cmd_result = virsh.secret_define(secret_xml_obj2.xml, debug=True)
            if cmd_result.exit_status == 0:
                test.fail("Expect fail on redefine after remove "
                          "uuid, but success indeed")

        if undefine_acl:
            cmd_result = virsh.secret_undefine(uuid, **acl_dargs)
        else:
            cmd_result = virsh.secret_undefine(uuid, debug=True, readonly=undefine_readonly)
            libvirt.check_exit_status(cmd_result, undefine_error)
            if undefine_readonly:
                if not re.search(expect_msg, cmd_result.stderr.strip()):
                    test.fail("Fail to get expect err msg: %s" % expect_msg)
                else:
                    logging.info("Get expect err msg: %s", expect_msg)
    finally:
        # cleanup
        virsh.secret_undefine(uuid, ignore_status=True)
        if os.path.exists(volume_path):
            os.unlink(volume_path)
        if os.path.exists(secret_obj_xmlfile):
            os.unlink(secret_obj_xmlfile)
Exemplo n.º 11
0
    cmd_result = virsh.secret_define(secret_xml_obj.xml, debug=True)

    cmd_result = virsh.secret_list(**virsh_dargs)
    libvirt.check_exit_status(cmd_result)
    try:
        uuid = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0]
    except IndexError:
        raise error.TestError("Fail to get secret uuid")

    if uuid:
        try:
            virsh.secret_dumpxml(uuid, to_file=file, **virsh_dargs)
        except error.CmdError, e:
            raise error.TestError(str(e))

    virsh.secret_undefine(uuid, ignore_status=True)
    if os.path.exists(volume_path):
        os.unlink(volume_path)
    if os.path.exists(secret_obj_xmlfile):
        os.unlink(secret_obj_xmlfile)


def interface_validate(file=None, **virsh_dargs):
    """
    Test for schema interface
    """
    cmd_result = virsh.iface_list(**virsh_dargs)
    libvirt.check_exit_status(cmd_result)
    try:
        iface_name = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0]
    except IndexError:
Exemplo n.º 12
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml")

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(
                    disk_src_host.split(), disk_src_port.split()):
                source_host.append({'name': host_name,
                                    'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'"
                            % (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(pool_name, mon_host,
                                      disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(),
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev]
        xml_dom_exp = ["source file='%s'" % snap_disk,
                       "backingStore type='network' index='1'",
                       "source protocol='rbd' name='%s'" % disk_src_name]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'"
                                % snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")
        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)"
                   .format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2")
        snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3")
        snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4")
        snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4")
        snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5")
        snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {"s1": "s1 --disk-only --no-metadata",
                                "s2": "s2 --memspec %s --no-metadata" % snapshot2_file,
                                "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file,
                                "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file),
                                "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file)}
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = results_stdout_52lts(secret_list_result).strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get("test_disk_internal_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no")
    disk_snapshot_with_sanlock = "yes" == params.get("disk_internal_with_sanlock", "no")

    # Create /etc/ceph/ceph.conf file to suppress false warning error message.
    process.run("mkdir -p /etc/ceph", ignore_status=True, shell=True)
    cmd = ("echo 'mon_host = {0}' >/etc/ceph/ceph.conf"
           .format(mon_host))
    process.run(cmd, ignore_status=True, shell=True)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name, guest_name,
                                        True, timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    img_file = os.path.join(data_dir.get_tmp_dir(),
                            "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(data_dir.get_tmp_dir(),
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        if not libvirt_version.version_compare(5, 0, 0):
            unsupported_err.append('Could not create file: Permission denied')
            unsupported_err.append('Permission denied')
        else:
            unsupported_err.append('unsupported configuration: external snapshot ' +
                                   'for readonly disk vdb is not supported')
    if test_disk_internal_snapshot:
        unsupported_err.append('unsupported configuration: internal snapshot for disk ' +
                               'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config = LibvirtSanLockConfig()
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict["sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict["chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict["restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict["sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                             "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict["chown sanlock:sanlock " +
                             "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict["sanlock client add_lockspace -s TEST_LS:1:" +
                             "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'}
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" %
                     (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" %
                          (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port})
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt,
                                          disk_src_name, disk_format,
                                          blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {"name": vol_name, "capacity": int(vol_cap),
                          "capacity_unit": vol_cap_unit, "format": disk_format}

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                        " %s %s %s" % (mon_host, key_opt, disk_src_name,
                                       disk_format, img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}'
                            % (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, and then wait the OS boot.
        # Otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login().close()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            xml_file = libvirt.create_disk_xml(params)
            if additional_guest:
                # Copy xml_file for additional guest VM.
                shutil.copyfile(xml_file, additional_xml_file)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                    additional_vm.wait_for_login().close()
                ret = virsh.attach_device(guest_name, additional_xml_file,
                                          "", debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path,
                                    targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(vm, targetdev, old_parts,
                               read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm"
                          " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.cancel(str(details))
        else:
            test.fail("VM failed to start."
                      "Error: %s" % str(details))
    finally:
        # Remove /etc/ceph/ceph.conf file if exists.
        if os.path.exists('/etc/ceph/ceph.conf'):
            os.remove('/etc/ceph/ceph.conf')
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
Exemplo n.º 13
0
def run(test, params, env):
    """
    Test command: virsh vol-resize

    Resize the capacity of the given volume (default bytes).
    1. Define and start a given type pool.
    2. Create a volume in the pool.
    3. Do vol-resize.
    4. Check the volume info.
    5. Delete the volume and pool.

    TODO:
    Add volume shrink test after libvirt uptream support it.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    vol_capacity = params.get("vol_capacity")
    vol_new_capacity = params.get("vol_new_capacity")
    resize_option = params.get("resize_option", "")
    check_vol_size = "yes" == params.get("check_vol_size", "yes")
    status_error = "yes" == params.get("status_error", "no")
    b_luks_encrypt = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []

    if not libvirt_version.version_compare(1, 0, 0):
        if "--allocate" in resize_option:
            test.cancel("'--allocate' flag is not supported in"
                        " current libvirt version.")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    libv_pvt = libvirt.PoolVolumeTest(test, params)
    try:
        libv_pool = libvirt_storage.StoragePool()
        # Raise error if given name pool already exist
        if libv_pool.pool_exists(pool_name):
            test.error("Pool '%s' already exist", pool_name)
        else:
            # Create a new pool
            libv_pvt.pre_pool(pool_name, pool_type, pool_target,
                              emulated_image, image_size=emulated_image_size)
            pool_info = libv_pool.pool_info(pool_name)
            for key in pool_info:
                logging.debug("Pool info: %s = %s", key, pool_info[key])
            # Deal with vol_new_capacity, '--capacity' only accpet integer
            if vol_new_capacity == "pool_available":
                pool_avai = pool_info["Available"].split()
                vol_new_capacity = pool_avai[0].split('.')[0] + pool_avai[1]
            if vol_new_capacity == "pool_capacity":
                pool_capa = pool_info["Capacity"].split()
                vol_new_capacity = pool_capa[0].split('.')[0] + pool_capa[1]

        # Create a volume
        if b_luks_encrypt:
            luks_sec_uuid = create_luks_secret(os.path.join(pool_target, vol_name),
                                               test)
            secret_uuids.append(luks_sec_uuid)
            set_secret_value(encryption_password, luks_sec_uuid)
            create_luks_vol(vol_name, luks_sec_uuid, params)
        else:
            libv_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format,
                             capacity=vol_capacity, allocation=None,
                             pool_name=pool_name)
        libv_vol = libvirt_storage.PoolVolume(pool_name)
        check_vol_info(libv_vol, vol_name, test)

        # The volume size may not accurate as we expect after resize, such as:
        # 1) vol_new_capacity = 1b with --delta option, the volume size will not
        #    change; run
        # 2) vol_new_capacity = 1KB with --delta option, the volume size will
        #    increase 1024 not 1000
        # So we can disable volume size check after resize
        if check_vol_size:
            vol_path = libv_vol.list_volumes()[vol_name]
            expect_info = get_expect_info(vol_new_capacity, vol_path, test,
                                          resize_option)
            logging.debug("Expect volume info: %s", expect_info)
        else:
            expect_info = {}

        # Run vol-resize
        result = virsh.vol_resize(vol_name, vol_new_capacity, pool_name,
                                  resize_option, uri=uri,
                                  unprivileged_user=unpri_user,
                                  debug=True)
        if not status_error:
            if result.exit_status != 0:
                test.fail(result.stdout.strip() + result.stderr.strip())
            else:
                if check_vol_info(libv_vol, vol_name, test, expect_info):
                    logging.debug("Volume %s resize check pass.", vol_name)
                else:
                    test.fail("Volume %s resize check fail." %
                              vol_name)
        elif result.exit_status == 0:
            test.fail("Expect resize fail but run successfully.")
    finally:
        # Clean up
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image)
            for secret_uuid in set(secret_uuids):
                virsh.secret_undefine(secret_uuid)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
Exemplo n.º 14
0
def run(test, params, env):
    """
    Test command: virsh secret-list

    Returns a list of secrets
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    status_error = ("yes" == params.get("status_error", "no"))
    secret_list_option = params.get("secret_list_option", "")

    # acl polkit params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    virsh_dargs = {'debug': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs['unprivileged_user'] = unprivileged_user
        virsh_dargs['uri'] = uri

    uuid_list = []
    for i in ['yes', 'no']:
        for j in ['yes', 'no']:
            # Generate valid uuid
            cmd = "uuidgen"
            status, uuid = commands.getstatusoutput(cmd)
            if status:
                raise error.TestNAError("Failed to generate valid uuid")
            uuid_list.append(uuid)

            # Get a full path of tmpfile, the tmpfile need not exist
            tmp_dir = data_dir.get_tmp_dir()
            volume_path = os.path.join(tmp_dir, "secret_volume_%s_%s" % (i, j))

            secret_xml_obj = SecretXML(ephemeral=i, private=j)
            secret_xml_obj.uuid = uuid
            secret_xml_obj.volume = volume_path
            secret_xml_obj.usage = "volume"
            secret_xml_obj.description = "test"

            virsh.secret_define(secret_xml_obj.xml, debug=True)

    try:
        cmd_result = virsh.secret_list(secret_list_option, **virsh_dargs)
        output = cmd_result.stdout.strip()
        exit_status = cmd_result.exit_status
        if not status_error and exit_status != 0:
            raise error.TestFail("Run failed with right command")
        if status_error and exit_status == 0:
            raise error.TestFail("Run successfully with wrong command!")

        # Reture if secret-list failed
        if exit_status != 0:
            return

        # Check the result
        m1 = re.search(uuid_list[0], output)
        m2 = re.search(uuid_list[1], output)
        m3 = re.search(uuid_list[2], output)
        m4 = re.search(uuid_list[3], output)

        if secret_list_option.find("--no-ephemeral") >= 0:
            if m1 or m2:
                raise error.TestFail("Secret object %s, %s shouldn't be listed"
                                     " out" % (uuid_list[0], uuid_list[1]))
            if secret_list_option.find("--private") >= 0:
                if not m3:
                    raise error.TestFail("Failed list secret object %s" %
                                         uuid_list[2])
                if m4:
                    raise error.TestFail("Secret object %s shouldn't be listed"
                                         " out" % uuid_list[3])
            elif secret_list_option.find("--no-private") >= 0:
                if not m4:
                    raise error.TestFail("Failed list secret object %s" %
                                         uuid_list[3])
                if m3:
                    raise error.TestFail("Secret object %s shouldn't be listed"
                                         " out" % uuid_list[2])
            else:
                if not m3 or not m4:
                    raise error.TestFail("Failed list secret object %s, %s" %
                                         (uuid_list[2], uuid_list[3]))
        elif secret_list_option.find("--ephemeral") >= 0:
            if m3 or m4:
                raise error.TestFail("Secret object %s, %s shouldn't be listed"
                                     " out" % (uuid_list[2], uuid_list[3]))
            if secret_list_option.find("--private") >= 0:
                if not m1:
                    raise error.TestFail("Failed list secret object %s" %
                                         uuid_list[0])
                if m2:
                    raise error.TestFail("Secret object %s shouldn't be listed"
                                         " out" % uuid_list[1])
            elif secret_list_option.find("--no-private") >= 0:
                if not m2:
                    raise error.TestFail("Failed list secret object %s" %
                                         uuid_list[1])
                if m1:
                    raise error.TestFail("Secret object %s shouldn't be listed"
                                         " out" % uuid_list[0])
            else:
                if not m1 or not m2:
                    raise error.TestFail("Failed list secret object %s, %s" %
                                         (uuid_list[0], uuid_list[1]))
        elif secret_list_option.find("--private") >= 0:
            if not m1 or not m3:
                raise error.TestFail("Failed list secret object %s, %s" %
                                     (uuid_list[0], uuid_list[2]))
            if m2 or m4:
                raise error.TestFail("Secret object %s and %s should't be "
                                     "listed out"
                                     % (uuid_list[1], uuid_list[3]))
        elif secret_list_option.find("--no-private") >= 0:
            if not m2 or not m4:
                raise error.TestFail("Failed list secret object %s, %s" %
                                     (uuid_list[1], uuid_list[3]))
            if m1 or m3:
                raise error.TestFail("Secret object %s and %s shouldn't be "
                                     "listed out" %
                                     (uuid_list[0], uuid_list[2]))
        elif secret_list_option is None:
            if not m1 or not m2 or not m3 or not m4:
                raise error.TestFail("Fail to list all secret objects: %s" %
                                     uuid_list)

    finally:
        #Cleanup
        for i in range(0, 4):
            virsh.secret_undefine(uuid_list[i], debug=True)
Exemplo n.º 15
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment, destroy or suspend a VM.
    2.Prepare tgtd and secret config.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def check_save_restore(save_file):
        """
        Test domain save and restore.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def check_snapshot():
        """
        Test domain snapshot operation.
        """
        snapshot1 = "s1"
        snapshot2 = "s2"

        ret = virsh.snapshot_create_as(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only --diskspec vda,"
                                       "file=/tmp/testvm-snap1"
                                       % snapshot2)
        libvirt.check_exit_status(ret, True)

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --memspec file=%s,snapshot=external"
                                       " --diskspec vda,file=/tmp/testvm-snap2"
                                       % (snapshot2, snapshot2))
        libvirt.check_exit_status(ret, True)

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]
            elif target.startswith("sd"):
                added_part = added_parts[0]
            if not added_part:
                logging.error("Cann't see added partition in VM")
                return False

            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(added_part))
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_qemu_cmd():
        """
        Check qemu-kvm command line options
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if driver_iothread:
            cmd += " | grep iothread=iothread%s" % driver_iothread

        if process.system(cmd, ignore_status=True, shell=True):
            test.fail("Can't see disk option '%s' "
                      "in command line" % cmd)

    def check_auth_plaintext(vm_name, password):
        """
        Check if libvirt passed the plaintext of the chap authentication
        password to qemu.
        :param vm_name: The name of vm to be checked.
        :param password: The plaintext of password used for chap authentication.
        :return: True if using plaintext, False if not.
        """
        cmd = ("ps -ef | grep -v grep | grep qemu-kvm | grep %s | grep %s"
               % (vm_name, password))
        return process.system(cmd, ignore_status=True, shell=True) == 0

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")

    # Controller specific attributes.
    cntlr_type = params.get('controller_type', None)
    cntlr_model = params.get('controller_model', None)
    cntlr_index = params.get('controller_index', None)
    controller_addr_options = params.get('controller_addr_options', None)

    driver_iothread = params.get("driver_iothread")

    # iscsi options.
    iscsi_target = params.get("iscsi_target")
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1")
    uuid = params.get("uuid", "")
    auth_uuid = "yes" == params.get("auth_uuid", "")
    auth_usage = "yes" == params.get("auth_usage", "")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error", "no")
    test_save_snapshot = "yes" == params.get("test_save_snapshot", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")

    secret_uuid = ""

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_place_in_location = params.get("auth_place_in_location")
            if 'source' in auth_place_in_location and not libvirt_version.version_compare(3, 9, 0):
                test.cancel("place auth in source is not supported in current libvirt version")
            auth_type = params.get("auth_type")
            secret_usage_target = params.get("secret_usage_target")
            secret_usage_type = params.get("secret_usage_type")
            chap_user = params.get("iscsi_user")
            chap_passwd = params.get("iscsi_password")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size=emulated_size,
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=iscsi_host)

        # If we use qcow2 disk format, should format iscsi disk first.
        if device_format == "qcow2":
            cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s"
                   % (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size))
            process.run(cmd, shell=True)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}

        # For lun type device, iothread attribute need to be set in controller.
        if driver_iothread and device != "lun":
            driver_dict.update({"iothread": driver_iothread})
            vmxml.iothreads = int(driver_iothread)
        elif driver_iothread:
            vmxml.iothreads = int(driver_iothread)

        disk_xml.driver = driver_dict
        # Check if we want to use a faked uuid.
        if not uuid:
            uuid = secret_uuid
        auth_dict = {}
        if auth_uuid:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_uuid": uuid}
        elif auth_usage:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_usage": secret_usage_target}
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {"protocol": "iscsi",
                         "name": "%s/%s" % (iscsi_target, lun_num)},
               "hosts": [{"name": iscsi_host, "port": iscsi_port}]})
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if 'source' in auth_place_in_location:
                disk_source.auth = disk_auth
            if 'disk' in auth_place_in_location:
                disk_xml.auth = disk_auth

        disk_xml.source = disk_source
        # Sync VM xml.
        vmxml.add_device(disk_xml)

        # After virtio 1.0 is enabled, lun type device need use virtio-scsi
        # instead of virtio, so additional controller is needed.
        # Add controller.
        if device == "lun":
            ctrl = Controller(type_name=cntlr_type)
            if cntlr_model is not None:
                ctrl.model = cntlr_model
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            ctrl_addr_dict = {}
            for addr_option in controller_addr_options.split(','):
                if addr_option != "":
                    addr_part = addr_option.split('=')
                    ctrl_addr_dict.update({addr_part[0].strip(): addr_part[1].strip()})
            ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict)

            # If driver_iothread is true, need add iothread attribute in controller.
            if driver_iothread:
                ctrl_driver_dict = {}
                ctrl_driver_dict.update({"iothread": driver_iothread})
                ctrl.driver = ctrl_driver_dict
            logging.debug("Controller XML is:%s", ctrl)
            if cntlr_type:
                vmxml.del_controller(cntlr_type)
            else:
                vmxml.del_controller("scsi")
            vmxml.add_device(ctrl)

        try:
            # Start the VM and check status.
            vmxml.sync()
            vm.start()
            if status_error:
                test.fail("VM started unexpectedly.")

            # Check Qemu command line
            if test_qemu_cmd:
                check_qemu_cmd()

        except virt_vm.VMStartError as e:
            if status_error:
                if re.search(uuid, str(e)):
                    pass
            else:
                test.fail("VM failed to start."
                          "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % xml_error)
        else:
            # Check partitions in VM.
            if check_partitions:
                if not check_in_vm(device_target, old_parts):
                    test.fail("Check disk partitions in VM failed")
            # Test domain save/restore/snapshot.
            if test_save_snapshot:
                save_file = os.path.join(data_dir.get_tmp_dir(), "%.save" % vm_name)
                check_save_restore(save_file)
                check_snapshot()
                if os.path.exists(save_file):
                    os.remove(save_file)
            # Test libvirt doesn't pass the plaintext of chap password to qemu,
            # this function is implemented in libvirt 4.3.0-1.
            if (libvirt_version.version_compare(4, 3, 0) and
                    (auth_uuid or auth_usage) and
                    chap_passwd):
                if(check_auth_plaintext(vm_name, chap_passwd)):
                    test.fail("Libvirt should not pass plaintext of chap "
                              "password to qemu-kvm.")

    finally:
        # Delete snapshots.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
Exemplo n.º 16
0
            # Start the VM and check status.
            vm.start()
            if status_error:
                raise error.TestFail("VM started unexpectedly.")

            if not check_in_vm(vm, device_target, old_parts):
                raise error.TestFail("Check encryption disk in VM failed")
        except virt_vm.VMStartError, e:
            if status_error:
                logging.debug("VM failed to start as expected."
                              "Error: %s" % str(e))
                pass
            else:
                raise error.TestFail("VM failed to start."
                                     "Error: %s" % str(e))

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for i in sec_uuid:
            virsh.secret_undefine(i, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if virsh.pool_state_dict().has_key(pool_name):
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
Exemplo n.º 17
0
def run(test, params, env):
    """
    Test virsh domblkthreshold option.

    1.Prepare backend storage (file/luks/iscsi/gluster/ceph/nbd)
    2.Start VM
    3.Set domblkthreshold on target device in VM
    4.Trigger one threshold event
    5.Check threshold event is received as expected
    6.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    block_threshold_timeout = params.get("block_threshold_timeout", "120")
    event_type = params.get("event_type", "block-threshold")
    block_threshold_option = params.get("block_threshold_option", "--loop")

    def set_vm_block_domblkthreshold(vm_name, target_device, threshold, **dargs):
        """
        Set VM block threshold on specific target device.

        :param vm_name: VM name.
        :param target_device: target device in VM
        :param threshold: threshold value with specific unit such as 100M
        :param dargs: mutable parameter dict
        """
        ret = virsh.domblkthreshold(vm_name, target_device, threshold, **dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_threshold_event(vm_domain, target):
        """
        Trigger block threshold event.

        :param vm_domain: VM name
        :param target: Disk dev in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            time.sleep(10)
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   " mount /dev/{0} /mnt && "
                   " dd if=/dev/urandom of=/mnt/bigfile bs=1M count=101"
                   .format(target))
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to mount and fill data in VM: %s" % output)
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise

    def check_threshold_event(vm_name, event_type, event_timeout, options, **dargs):
        """
        Check threshold event.

        :param vm_name: VM name
        :param event_type: event type.
        :param event_timeout: event timeout value
        :param options: event option
        :dargs: dynamic parameters.
        """
        ret = virsh.event(vm_name, event_type, event_timeout, options, **dargs)
        logging.debug(ret.stdout_text)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, vol_params):
        """
        Create volume.

        :param p_name: Pool name.
        :param vol_params: Volume parameters dict.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_commit(vm_name, target, blockcommit_options, **virsh_dargs):
        """
        Trigger blockcommit.

        :param vm_name: VM name
        :param target: Disk dev in VM.
        :param blockcommit_options: blockcommit option
        :param virsh_dargs: additional parameters
        """
        result = virsh.blockcommit(vm_name, target,
                                   blockcommit_options, ignore_status=False, **virsh_dargs)

    def trigger_block_copy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs):
        """
        Trigger blockcopy

        :param vm_name: string, VM name
        :param target: string, target disk
        :param dest_path: string, the path of copied disk
        :param blockcopy_options: string, some options applied
        :param virsh_dargs: additional options
        """
        result = virsh.blockcopy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs)
        libvirt.check_exit_status(result)

    def trigger_mirror_threshold_event(vm_domain, target):
        """
        Trigger mirror mode block threshold event.

        :param vm_domain: VM name
        :param target: Disk target in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            # Sleep 10 seconds to let wait for events thread start first in main thread
            time.sleep(10)
            cmd = ("dd if=/dev/urandom of=file bs=1G count=3")
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to fill data in VM target: %s with %s" % (target, output))
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise
        except Exception as ex:
            raise

    def get_mirror_source_index(vm_name, dev_index=0):
        """
        Get mirror source index

        :param vm_name: VM name
        :param dev_index: Disk device index.
        :return mirror source index in integer
        """
        disk_list = vm_xml.VMXML.get_disk_source(vm_name)
        disk_mirror = disk_list[dev_index].find('mirror')
        if disk_mirror is None:
            test.fail("Failed to get disk mirror")
        disk_mirror_source = disk_mirror.find('source')
        return int(disk_mirror_source.get('index'))

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage auth info
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")
    use_auth_usage = "yes" == params.get("use_auth_usage")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")

    mirror_mode_blockcommit = "yes" == params.get("mirror_mode_blockcommit", "no")
    mirror_mode_blockcopy = "yes" == params.get("mirror_mode_blockcopy", "no")
    default_snapshot_test = "yes" == params.get("default_snapshot_test", "no")
    block_threshold_value = params.get("block_threshold_value", "100M")
    snapshot_external_disks = []
    tmp_dir = data_dir.get_tmp_dir()
    dest_path = params.get("dest_path", "/var/lib/libvirt/images/newclone")

    pvt = None
    # Initialize one NbdExport object
    nbd = None
    img_file = os.path.join(data_dir.get_tmp_dir(),
                            "%s_test.img" % vm_name)
    if ((backend_storage_type == "luks") and
            not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot support <encryption> inside disk in this libvirt version.")

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Additional disk images.
    disks_img = []
    try:
        # Clean up dirty secrets in test environments if there are.
        utils_secret.clean_up_secrets()
        # Setup backend storage
        if backend_storage_type == "file":
            image_filename = params.get("image_filename", "raw.img")
            disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
            device_source = libvirt.create_local_disk(backend_storage_type, disk_path, storage_size, device_format)
            disks_img.append({"format": device_format,
                              "source": disk_path, "path": disk_path})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Setup backend storage
        elif backend_storage_type == "luks":
            luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
            luks_secret_passwd = params.get("luks_secret_passwd", "password")
            # Create secret
            luks_sec_uuid = libvirt.create_secret(params)
            logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
            virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd,
                                   encode=True, ignore_status=False, debug=True)
            image_filename = params.get("image_filename", "raw.img")
            device_source = os.path.join(data_dir.get_tmp_dir(), image_filename)

            disks_img.append({"format": device_format,
                              "source": device_source, "path": device_source})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
            disk_encryption_dict = {"encryption": "luks",
                                    "secret": {"type": "passphrase",
                                               "uuid": luks_sec_uuid}}

            cmd = ("qemu-img create -f luks "
                   "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
                   "-o key-secret=sec0 %s %s" % (luks_encrypt_passwd, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a luks encrypted img by qemu-img")
        elif backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_passwd", "password")
                auth_sec_usage = params.get("auth_sec_usage",
                                            "libvirtiscsi")
                auth_sec_dict = {"sec_usage": "iscsi",
                                 "sec_target": auth_sec_usage}
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                # Set password of auth secret (not luks encryption secret)
                virsh.secret_set_value(auth_sec_uuid, chap_passwd,
                                       encode=True, ignore_status=False, debug=True)
                iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                    is_setup=True, is_login=False, image_size=storage_size,
                    chap_user=chap_user, chap_passwd=chap_passwd,
                    portal_ip=iscsi_host)
                # ISCSI auth attributes for disk xml
                disk_auth_dict = {"auth_user": chap_user,
                                  "secret_type": auth_sec_usage_type,
                                  "secret_usage": auth_sec_usage_target}
                device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port,
                                                         iscsi_target, lun_num)
                disk_src_dict = {"attrs": {"protocol": "iscsi",
                                           "name": "%s/%s" % (iscsi_target, lun_num)},
                                 "hosts": [{"name": iscsi_host, "port": iscsi_port}]}
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                    is_setup=True,
                    vol_name=gluster_vol_name,
                    pool_name=gluster_pool_name,
                    **params)

            device_source = "gluster://%s/%s/%s" % (gluster_host_ip,
                                                    gluster_vol_name,
                                                    gluster_img_name)
            cmd = ("qemu-img create -f %s "
                   "%s %s" % (device_format, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a gluster type img by qemu-img")
            disk_src_dict = {"attrs": {"protocol": "gluster",
                                       "name": "%s/%s" % (gluster_vol_name,
                                                          gluster_img_name)},
                             "hosts":  [{"name": gluster_host_ip,
                                         "port": "24007"}]}
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")

            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            # If enable auth, prepare a local file to save key
            if ceph_client_name and ceph_client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (ceph_client_name, ceph_client_key))
                key_opt = "--keyring %s" % key_file
                auth_sec_dict = {"sec_usage": auth_sec_usage_type,
                                 "sec_name": "ceph_auth_secret"}
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid, ceph_auth_key,
                                       debug=True)
                disk_auth_dict = {"auth_user": ceph_auth_user,
                                  "secret_type": auth_sec_usage_type,
                                  "secret_uuid": auth_sec_uuid}
            else:
                test.error("No ceph client name/key provided.")
            device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name,
                                                               ceph_mon_ip,
                                                               key_file)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s %s" %
                        (device_format, img_file, storage_size))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_path = ("rbd:%s:mon_host=%s" %
                         (ceph_disk_name, ceph_mon_ip))
            if ceph_client_name and ceph_client_key:
                disk_path += (":id=%s:key=%s" %
                              (ceph_auth_user, ceph_auth_key))
            rbd_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                       " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name,
                                      device_format, img_file, disk_path))
            process.run(rbd_cmd, ignore_status=False, shell=True)
            disk_src_dict = {"attrs": {"protocol": "rbd",
                                       "name": ceph_disk_name},
                             "hosts":  [{"name": ceph_host_ip,
                                         "port": ceph_host_port}]}
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            # Set virt_use_nfs
            virt_use_nfs = params.get("virt_use_nfs", "off")
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_nfs value")

            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            # Create one image on nfs server
            libvirt.create_local_disk("file", device_source, '1', "raw")
            disks_img.append({"format": device_format,
                              "source": device_source, "path": device_source})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Create dir based pool,and then create one volume on it.
        elif backend_storage_type == "dir":
            pool_name = params.get("pool_name", "dir_pool")
            pool_target = params.get("pool_target")
            pool_type = params.get("pool_type")
            emulated_image = params.get("emulated_image")
            image_name = params.get("dir_image_name", "luks_1.img")
            # Create and start dir_based pool.
            pvt = libvirt.PoolVolumeTest(test, params)
            if not os.path.exists(pool_target):
                os.mkdir(pool_target)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            sp = libvirt_storage.StoragePool()
            if not sp.is_pool_active(pool_name):
                sp.set_pool_autostart(pool_name)
                sp.start_pool(pool_name)
            # Create one volume on the pool.
            volume_name = params.get("vol_name")
            volume_alloc = params.get("vol_alloc")
            volume_cap_unit = params.get("vol_cap_unit")
            volume_cap = params.get("vol_cap")
            volume_target_path = params.get("sec_volume")
            volume_target_format = params.get("target_format")
            volume_target_encypt = params.get("target_encypt", "")
            volume_target_label = params.get("target_label")
            vol_params = {"name": volume_name, "capacity": int(volume_cap),
                          "allocation": int(volume_alloc), "format":
                          volume_target_format, "path": volume_target_path,
                          "label": volume_target_label,
                          "capacity_unit": volume_cap_unit}
            try:
                # If Libvirt version is lower than 2.5.0
                # Creating luks encryption volume is not supported,so skip it.
                create_vol(pool_name, vol_params)
            except AssertionError as info:
                err_msgs = ("create: invalid option")
                if str(info).count(err_msgs):
                    test.cancel("Creating luks encryption volume "
                                "is not supported on this libvirt version")
                else:
                    test.error("Failed to create volume."
                               "Error: %s" % str(info))
            disk_src_dict = {'attrs': {'file': volume_target_path}}
            device_source = volume_target_path
        elif backend_storage_type == "nbd":
            # Get server hostname.
            hostname = process.run('hostname', ignore_status=False, shell=True, verbose=True).stdout_text.strip()
            # Setup backend storage
            nbd_server_host = hostname
            nbd_server_port = params.get("nbd_server_port")
            image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img")
            # Create NbdExport object
            nbd = NbdExport(image_path, image_format=device_format,
                            port=nbd_server_port)
            nbd.start_nbd_server()
            # Prepare disk source xml
            source_attrs_dict = {"protocol": "nbd"}
            disk_src_dict = {}
            disk_src_dict.update({"attrs": source_attrs_dict})
            disk_src_dict.update({"hosts": [{"name": nbd_server_host, "port": nbd_server_port}]})
            device_source = "nbd://%s:%s/%s" % (nbd_server_host,
                                                nbd_server_port,
                                                image_path)

        logging.debug("device source is: %s", device_source)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        if disk_encryption_dict:
            disk_encryption_dict = {"encryption": "luks",
                                    "secret": {"type": "passphrase",
                                               "uuid": luks_sec_uuid}}
            disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)

            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml except mirror_mode_blockcommit or mirror_mode_blockcopy
        if (not mirror_mode_blockcommit and not mirror_mode_blockcopy):
            vmxml.add_device(disk_xml)
        try:
            vmxml.sync()
            vm.start()
            vm.wait_for_login().close()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s", str(xml_error))
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s", str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        func_name = trigger_block_threshold_event
        # Additional operations before set block threshold
        if backend_storage_type == "file":
            logging.info("Create snapshot...")
            snap_opt = " %s --disk-only "
            snap_opt += "%s,snapshot=external,file=%s"
            if default_snapshot_test:
                for index in range(1, 5):
                    snapshot_name = "snapshot_%s" % index
                    snap_path = "%s/%s_%s.snap" % (tmp_dir, vm_name, index)
                    snapshot_external_disks.append(snap_path)
                    snap_option = snap_opt % (snapshot_name, device_target, snap_path)
                    virsh.snapshot_create_as(vm_name, snap_option,
                                             ignore_status=False, debug=True)

            if mirror_mode_blockcommit:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Set threshold for disk mirroring feature is not supported on current version")
                vmxml.del_device(disk_xml)
                virsh.snapshot_create_as(vm_name, "--disk-only --no-metadata",
                                         ignore_status=False, debug=True)
                # Do active blockcommit in background.
                blockcommit_options = "--active"
                mirror_blockcommit_thread = threading.Thread(target=trigger_block_commit,
                                                             args=(vm_name, 'vda', blockcommit_options,),
                                                             kwargs={'debug': True})
                mirror_blockcommit_thread.start()
                device_target = "vda[1]"
                func_name = trigger_mirror_threshold_event
            if mirror_mode_blockcopy:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Set threshold for disk mirroring feature is not supported on current version")
                # Do transient blockcopy in backgroud.
                blockcopy_options = "--transient-job "
                # Do cleanup
                if os.path.exists(dest_path):
                    libvirt.delete_local_disk("file", dest_path)
                mirror_blockcopy_thread = threading.Thread(target=trigger_block_copy,
                                                           args=(vm_name, 'vda', dest_path, blockcopy_options,),
                                                           kwargs={'debug': True})
                mirror_blockcopy_thread.start()
                mirror_blockcopy_thread.join(10)
                device_target = "vda[%d]" % get_mirror_source_index(vm_name)
                func_name = trigger_mirror_threshold_event
        set_vm_block_domblkthreshold(vm_name, device_target, block_threshold_value, **{"debug": True})
        cli_thread = threading.Thread(target=func_name,
                                      args=(vm, device_target))
        cli_thread.start()
        check_threshold_event(vm_name, event_type, block_threshold_timeout, block_threshold_option, **{"debug": True})
    finally:
        # Delete snapshots.
        if virsh.domain_exists(vm_name):
            #To delete snapshot, destroy VM first.
            if vm.is_alive():
                vm.destroy()
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        vmxml_backup.sync("--snapshots-metadata")

        if os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        for img in disks_img:
            if os.path.exists(img["path"]):
                libvirt.delete_local_disk("file", img["path"])

        for disk in snapshot_external_disks:
            libvirt.delete_local_disk('file', disk)

        if os.path.exists(dest_path):
            libvirt.delete_local_disk("file", dest_path)

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)
        elif backend_storage_type == "nfs":
            result = process.run("setsebool virt_use_nfs off",
                                 shell=True)
            if result.exit_status:
                logging.info("Failed to restore virt_use_nfs value")
        elif backend_storage_type == "nbd":
            if nbd:
                try:
                    nbd.cleanup()
                except Exception as ndbEx:
                    logging.info("Clean Up nbd failed: %s" % str(ndbEx))
        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
Exemplo n.º 18
0
def run(test, params, env):
    '''
    Test the command virsh pool-create-as

    (1) Prepare backend storage device
    (2) Define secret xml and set secret value
    (3) Test pool-create-as or virsh pool-define with authenication
    '''

    pool_options = params.get('pool_options', '')
    pool_name = params.get('pool_name')
    pool_type = params.get('pool_type')
    pool_target = params.get('pool_target', '')
    status_error = params.get('status_error') == "yes"

    # iscsi options
    emulated_size = params.get("iscsi_image_size", "1")
    iscsi_host = params.get("iscsi_host", "127.0.0.1")
    chap_user = params.get("iscsi_user")
    chap_passwd = params.get("iscsi_password")

    # ceph options
    ceph_auth_user = params.get("ceph_auth_user")
    ceph_auth_key = params.get("ceph_auth_key")
    ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
    ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
    ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
    ceph_client_name = params.get("ceph_client_name")
    ceph_client_key = params.get("ceph_client_key")
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    key_opt = "--keyring %s" % key_file

    # auth options
    auth_usage = (params.get('auth_usage') == 'yes')
    auth_uuid = (params.get('auth_uuid') == 'yes')
    sec_ephemeral = params.get("secret_ephemeral", "no")
    sec_private = params.get("secret_private", "yes")
    sec_desc = params.get("secret_description")
    auth_type = params.get("auth_type")
    sec_usage = params.get("secret_usage_type")
    sec_target = params.get("secret_usage_target")
    sec_name = params.get("secret_name")
    auth_sec_dict = {"sec_ephemeral": sec_ephemeral,
                     "sec_private": sec_private,
                     "sec_desc": sec_desc,
                     "sec_usage": sec_usage,
                     "sec_target": sec_target,
                     "sec_name": sec_name}

    if sec_usage == "iscsi":
        auth_username = chap_user
        sec_password = chap_passwd
        secret_usage = sec_target

    if sec_usage == "ceph":
        auth_username = ceph_auth_user
        sec_password = ceph_auth_key
        secret_usage = sec_name

    if pool_target and not os.path.isdir(pool_target):
        if os.path.isfile(pool_target):
            logging.error('<target> must be a directory')
        else:
            os.makedirs(pool_target)

    def setup_ceph_auth():
        disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip))
        disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key))

        if not utils_package.package_install(["ceph-common"]):
            test.error("Failed to install ceph-common")

        with open(key_file, 'w') as f:
            f.write("[%s]\n\tkey = %s\n" %
                    (ceph_client_name, ceph_client_key))

        # Delete the disk if it exists
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
               "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
        process.run(cmd, ignore_status=True, shell=True)

        # Create an local image and make FS on it.
        img_file = os.path.join(data_dir.get_tmp_dir(), "test.img")
        disk_cmd = ("qemu-img create -f raw {0} 10M && mkfs.ext4 -F {0}"
                    .format(img_file))
        process.run(disk_cmd, ignore_status=False, shell=True)

        # Convert the image to remote storage
        # Ceph can only support raw format
        disk_cmd = ("qemu-img convert -O %s %s %s"
                    % ("raw", img_file, disk_path))
        process.run(disk_cmd, ignore_status=False, shell=True)

    def setup_iscsi_auth():
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size=emulated_size,
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd)
        return iscsi_target

    def check_auth_in_xml(dparams):
        sourcexml = pool_xml.PoolXML.new_from_dumpxml(pool_name).get_source()
        with open(sourcexml.xml) as xml_f:
            logging.debug("Source XML is: \n%s", xml_f.read())

        # Check result
        try:
            for name, v_expect in dparams.items():
                if v_expect != sourcexml[name]:
                    test.fail("Expect to find %s=%s, but got %s=%s"
                              % (name, v_expect, name, sourcexml[name]))
        except xcepts.LibvirtXMLNotFoundError as details:
            if "usage not found" in str(details) and auth_uuid:
                pass  # Not a auth_usage test
            elif "uuid not found" in str(details) and auth_usage:
                pass  # Not a auth_uuid test
            else:
                test.fail(details)

    def check_result(result, expect_error=False):
        # pool-define-as return CmdResult
        if isinstance(result, process.CmdResult):
            result = (result.exit_status == 0)  # True means run success

        if expect_error:
            if result:
                test.fail("Expect to fail but run success")
        elif not expect_error:
            if not result:
                test.fail("Expect to succeed but run failure")
        else:
            logging.info("It's an expected error")

    if not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Pool create/define with authentication"
                    " not support in this libvirt version")

    sec_uuid = ""
    img_file = ""
    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    libvirt_pool = libvirt_storage.StoragePool()
    try:
        # Create secret xml and set value
        encode = True
        if sec_usage == "ceph":
            encode = False  # Ceph key already encoded
        sec_uuid = libvirt.create_secret(auth_sec_dict)
        virsh.secret_set_value(sec_uuid, sec_password, encode=encode, debug=True)

        if sec_usage == "iscsi":
            iscsi_dev = setup_iscsi_auth()
            pool_options += (" --source-host %s --source-dev %s"
                             " --auth-type %s --auth-username %s"
                             % (iscsi_host, iscsi_dev, auth_type, auth_username))

        if sec_usage == "ceph":
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            setup_ceph_auth()
            rbd_pool = ceph_disk_name.split('/')[0]
            pool_options += (" --source-host %s --source-name %s"
                             " --auth-type %s --auth-username %s"
                             % (ceph_host_ip, rbd_pool, auth_type, auth_username))

        if auth_usage:
            pool_options += " --secret-usage %s" % secret_usage

        if auth_uuid:
            pool_options += " --secret-uuid %s" % sec_uuid

        # Run test cases
        func_name = params.get("test_func", "pool_create_as")
        logging.info('Perform test runner: %s', func_name)
        if func_name == "pool_create_as":
            func = virsh.pool_create_as
        if func_name == "pool_define_as":
            func = virsh.pool_define_as
        result = func(pool_name, pool_type, pool_target,
                      extra=pool_options, debug=True)

        # Check status_error
        check_result(result, expect_error=status_error)
        if not status_error:
            # Check pool status
            pool_status = libvirt_pool.pool_state(pool_name)
            if ((pool_status == 'inactive' and func_name == "pool_define_as") or
                    (pool_status == "active" and func_name == "pool_create_as")):
                logging.info("Expected pool status:%s" % pool_status)
            else:
                test.fail("Not an expected pool status: %s" % pool_status)
            # Check pool dumpxml
            dict_expect = {"auth_type": auth_type, "auth_username": auth_username,
                           "secret_usage": secret_usage, "secret_uuid": sec_uuid}
            check_auth_in_xml(dict_expect)
    finally:
        # Clean up
        logging.info("Start to cleanup")
        # Remove ceph configure file if created.
        if ceph_cfg:
            os.remove(ceph_cfg)
        if os.path.exists(img_file):
            os.remove(img_file)
        virsh.secret_undefine(sec_uuid, ignore_status=True)
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        if libvirt_pool.pool_exists(pool_name):
            libvirt_pool.delete_pool(pool_name)
Exemplo n.º 19
0
def run(test, params, env):
    """
    Test push-mode incremental backup

    Steps:
    1. create a vm with extra disk vdb
    2. create some data on vdb in vm
    3. start a push mode full backup on vdb
    4. create some data on vdb in vm
    5. start a push mode incremental backup
    6. repeat step 4 and 5 as required
    7. check the full/incremental backup file data
    """
    def backup_job_done(vm_name, vm_disk):
        """
        Check if a backup job for a vm's specific disk is finished.

        :param vm_name: vm's name
        :param vm_disk: the disk to be checked, such as 'vdb'
        :return: 'True' means job finished
        """
        result = virsh.blockjob(vm_name, vm_disk, debug=True)
        if "no current block job" in result.stdout_text.strip().lower():
            return True

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    target_driver = params.get("target_driver", "qcow2")
    target_type = params.get("target_type", "file")
    target_blkdev_path = params.get("target_blkdev_path")
    target_blkdev_size = params.get("target_blkdev_size", original_disk_size)
    reuse_target_file = "yes" == params.get("reuse_target_file")
    prepare_target_file = "yes" == params.get("prepare_target_file")
    prepare_target_blkdev = "yes" == params.get("prepare_target_blkdev")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    tmp_dir = data_dir.get_data_dir()
    virsh_dargs = {'debug': True, 'ignore_status': True}

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "ceph":
            ceph_mon_host = params.get("ceph_mon_host",
                                       "EXAMPLE_MON_HOST_AUTHX")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORT")
            ceph_pool_name = params.get("ceph_pool_name", "EXAMPLE_POOL")
            ceph_file_name = params.get("ceph_file_name", "EXAMPLE_FILE")
            ceph_disk_name = ceph_pool_name + "/" + ceph_file_name
            ceph_client_name = params.get("ceph_client_name",
                                          "EXAMPLE_CLIENT_NAME")
            ceph_client_key = params.get("ceph_client_key",
                                         "EXAMPLE_CLIENT_KEY")
            ceph_auth_user = params.get("ceph_auth_user", "EXAMPLE_AUTH_USER")
            ceph_auth_key = params.get("ceph_auth_key", "EXAMPLE_AUTH_KEY")
            auth_sec_usage_type = "ceph"

            enable_auth = "yes" == params.get("enable_auth", "yes")
            key_file = os.path.join(tmp_dir, "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_host)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {
                        "sec_usage": auth_sec_usage_type,
                        "sec_name": "ceph_auth_secret"
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid,
                                           ceph_auth_key,
                                           debug=True)
                    disk_params_auth = {
                        "auth_user": ceph_auth_user,
                        "secret_type": auth_sec_usage_type,
                        "secret_uuid": auth_sec_uuid,
                        "auth_in_source": True
                    }
                else:
                    test.error("No ceph client name/key provided.")
                disk_path = "rbd:%s:mon_host=%s:keyring=%s" % (
                    ceph_disk_name, ceph_mon_host, key_file)
            ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name, ceph_file_name,
                              ceph_cfg, key_file)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'rbd',
                'source_name': ceph_disk_name,
                'source_host_name': ceph_mon_host,
                'source_host_port': ceph_host_port
            }
            disk_params.update(disk_params_src)
            disk_params.update(disk_params_auth)
        else:
            test.error("The disk type '%s' not supported in this script." %
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_path_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "push"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = target_type
                    target_params = {"attrs": {}}
                    if target_type == "file":
                        target_file_name = "target_file_%s" % backup_index
                        target_file_path = os.path.join(
                            tmp_dir, target_file_name)
                        if prepare_target_file:
                            libvirt.create_local_disk("file", target_file_path,
                                                      original_disk_size,
                                                      target_driver)
                        target_params["attrs"]["file"] = target_file_path
                        backup_path_list.append(target_file_path)
                    elif target_type == "block":
                        if prepare_target_blkdev:
                            target_blkdev_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=target_blkdev_size)
                        target_params["attrs"]["dev"] = target_blkdev_path
                        backup_path_list.append(target_blkdev_path)
                    else:
                        test.fail(
                            "We do not support backup target type: '%s'" %
                            target_type)
                    logging.debug("target params: %s", target_params)
                    backup_disk_params["backup_target"] = target_params
                    driver_params = {"type": target_driver}
                    backup_disk_params["backup_driver"] = driver_params
                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)
            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()

            if reuse_target_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())

            # Wait for the backup job actually finished
            if not utils_misc.wait_for(
                    lambda: backup_job_done(vm_name, original_disk_target),
                    60):
                test.fail("Backup job not finished in 60s")

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)

        for backup_path in backup_path_list:
            if target_driver == "qcow2":
                # Clear backup image's backing file before comparison
                qemu_cmd = ("qemu-img rebase -u -f qcow2 -b '' -F qcow2 %s" %
                            backup_path)
                process.run(qemu_cmd, shell=True, verbose=True)
            if not utils_backup.cmp_backup_data(
                    original_data_file,
                    backup_path,
                    backup_file_driver=target_driver):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_path))
            else:
                logging.debug("'%s' contains correct backup data", backup_path)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints
        if "checkpoint_list" in locals() and checkpoint_list:
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name, checkpoint_name)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove local backup file
        if "target_file_path" in locals():
            if os.path.exists(target_file_path):
                os.remove(target_file_path)

        # Remove iscsi devices
        libvirt.setup_or_cleanup_iscsi(False)

        # Remove ceph related data
        if original_disk_type == "ceph":
            ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name, ceph_file_name,
                              ceph_cfg, key_file)
            if "auth_sec_uuid" in locals() and auth_sec_uuid:
                virsh.secret_undefine(auth_sec_uuid)
            if "ceph_cfg" in locals() and os.path.exists(ceph_cfg):
                os.remove(ceph_cfg)
            if os.path.exists(key_file):
                os.remove(key_file)
Exemplo n.º 20
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare backend storage (blkdev/iscsi/gluster/ceph)
    2.Use luks format to encrypt the backend storage
    3.Prepare a disk xml indicating to the backend storage with valid/invalid
      luks password
    4.Start VM with disk hot/cold plugged
    5.Check some disk operations in VM
    6.Check backend storage is still in luks format
    7.Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def encrypt_dev(device, params):
        """
        Encrypt device with luks format

        :param device: Storage deivce to be encrypted.
        :param params: From the dict to get encryption password.
        """
        password = params.get("luks_encrypt_passwd", "password")
        size = params.get("luks_size", "500M")
        cmd = ("qemu-img create -f luks "
               "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
               "-o key-secret=sec0 %s %s" % (password, device, size))
        if process.system(cmd, shell=True):
            test.fail("Can't create a luks encrypted img by qemu-img")

    def check_dev_format(device, fmt="luks"):
        """
        Check if device is in luks format

        :param device: Storage deivce to be checked.
        :param fmt: Expected disk format.
        :return: If device's format equals to fmt, return True, else return False.
        """
        cmd_result = process.run("qemu-img" + ' -h', ignore_status=True,
                                 shell=True, verbose=False)
        if b'-U' in cmd_result.stdout:
            cmd = ("qemu-img info -U %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        else:
            cmd = ("qemu-img info %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        if cmd_result.exit_status:
            test.fail("device %s is not in %s format. err is: %s" %
                      (device, fmt, cmd_result.stderr))

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param target: Disk dev in VM.
        :param old_parts: Original disk partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(added_part))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage options.
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")

    # Luks encryption info, luks_encrypt_passwd is the password used to encrypt
    # luks image, and luks_secret_passwd is the password set to luks secret, you
    # can set a wrong password to luks_secret_passwd for negative tests
    luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
    luks_secret_passwd = params.get("luks_secret_passwd", "password")
    # Backend storage auth info
    use_auth_usage = "yes" == params.get("use_auth_usage")
    if use_auth_usage:
        use_auth_uuid = False
    else:
        use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")

    status_error = "yes" == params.get("status_error")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    encryption_in_source = "yes" == params.get("encryption_in_source", "no")
    auth_in_source = "yes" == params.get("auth_in_source", "no")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}
    pvt = None

    if ((encryption_in_source or auth_in_source) and
            not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot put <encryption> or <auth> inside disk <source> "
                    "in this libvirt version.")
    # Start VM and get all partions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Setup backend storage
        if backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                if enable_auth:
                    chap_user = params.get("chap_user", "redhat")
                    chap_passwd = params.get("chap_passwd", "password")
                    auth_sec_usage = params.get("auth_sec_usage",
                                                "libvirtiscsi")
                    auth_sec_dict = {"sec_usage": "iscsi",
                                     "sec_target": auth_sec_usage}
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    # Set password of auth secret (not luks encryption secret)
                    virsh.secret_set_value(auth_sec_uuid, chap_passwd,
                                           encode=True, debug=True)
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True, is_login=False, image_size=storage_size,
                        chap_user=chap_user, chap_passwd=chap_passwd,
                        portal_ip=iscsi_host)
                    # ISCSI auth attributes for disk xml
                    if use_auth_uuid:
                        disk_auth_dict = {"auth_user": chap_user,
                                          "secret_type": auth_sec_usage_type,
                                          "secret_uuid": auth_sec_uuid}
                    elif use_auth_usage:
                        disk_auth_dict = {"auth_user": chap_user,
                                          "secret_type": auth_sec_usage_type,
                                          "secret_usage": auth_sec_usage_target}
                else:
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True, is_login=False, image_size=storage_size,
                        portal_ip=iscsi_host)
                device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port,
                                                         iscsi_target, lun_num)
                disk_src_dict = {"attrs": {"protocol": "iscsi",
                                           "name": "%s/%s" % (iscsi_target, lun_num)},
                                 "hosts": [{"name": iscsi_host, "port": iscsi_port}]}
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = libvirt.setup_or_cleanup_gluster(
                    is_setup=True,
                    vol_name=gluster_vol_name,
                    pool_name=gluster_pool_name)
            device_source = "gluster://%s/%s/%s" % (gluster_host_ip,
                                                    gluster_vol_name,
                                                    gluster_img_name)
            disk_src_dict = {"attrs": {"protocol": "gluster",
                                       "name": "%s/%s" % (gluster_vol_name,
                                                          gluster_img_name)},
                             "hosts":  [{"name": gluster_host_ip,
                                         "port": "24007"}]}
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {"sec_usage": auth_sec_usage_type,
                                     "sec_name": "ceph_auth_secret"}
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid, ceph_auth_key,
                                           debug=True)
                    disk_auth_dict = {"auth_user": ceph_auth_user,
                                      "secret_type": auth_sec_usage_type,
                                      "secret_uuid": auth_sec_uuid}
                    cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                           "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
                else:
                    test.error("No ceph client name/key provided.")
                device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name,
                                                                   ceph_mon_ip,
                                                                   key_file)
            else:
                device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)
            disk_src_dict = {"attrs": {"protocol": "rbd",
                                       "name": ceph_disk_name},
                             "hosts":  [{"name": ceph_host_ip,
                                         "port": ceph_host_port}]}
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        else:
            test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.")
        logging.debug("device source is: %s", device_source)
        luks_sec_uuid = libvirt.create_secret(params)
        logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
        ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd,
                                     encode=True, debug=True)
        encrypt_dev(device_source, params)
        libvirt.check_exit_status(ret)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            if auth_in_source:
                disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
            else:
                disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_encryption_dict = {"encryption": "luks",
                                "secret": {"type": "passphrase",
                                           "uuid": luks_sec_uuid}}
        disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)
        if encryption_in_source:
            disk_source.encryption = disk_encryption
        else:
            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        vmxml.sync()
        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error and not hotplug_disk:
                logging.info("VM failed to start as expected: %s" % str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        if hotplug_disk:
            result = virsh.attach_device(vm_name, disk_xml.xml,
                                         ignore_status=True, debug=True)
            libvirt.check_exit_status(result, status_error)
        if check_partitions and not status_error:
            if not check_in_vm(device_target, old_parts):
                test.fail("Check disk partitions in VM failed")
        check_dev_format(device_source)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            libvirt.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)

        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
Exemplo n.º 21
0
def run(test, params, env):
    """
    1. Create a pool
    2. Create n number of volumes(vol-create-as)
    3. Check the volume details from the following commands
       vol-info
       vol-key
       vol-list
       vol-name
       vol-path
       vol-pool
       qemu-img info
    4. Delete the volume and check in vol-list
    5. Repeat the steps for number of volumes given
    6. Delete the pool and target
    TODO: Handle negative testcases
    """

    def delete_volume(expected_vol):
        """
        Deletes Volume
        """
        pool_name = expected_vol['pool_name']
        vol_name = expected_vol['name']
        pv = libvirt_storage.PoolVolume(pool_name)
        if not pv.delete_volume(vol_name):
            raise error.TestFail("Delete volume failed." % vol_name)
        else:
            logging.debug("Volume: %s successfully deleted on pool: %s",
                          vol_name, pool_name)

    def get_vol_list(pool_name, vol_name):
        """
        Parse the volume list
        """
        output = virsh.vol_list(pool_name, "--details")
        rg = re.compile(
            r'^(\S+)\s+(\S+)\s+(\S+)\s+(\d+.\d+\s\S+)\s+(\d+.\d+.*)')
        vol = {}
        vols = []
        volume_detail = None
        for line in output.stdout.splitlines():
            match = re.search(rg, line.lstrip())
            if match is not None:
                vol['name'] = match.group(1)
                vol['path'] = match.group(2)
                vol['type'] = match.group(3)
                vol['capacity'] = match.group(4)
                vol['allocation'] = match.group(5)
                vols.append(vol)
                vol = {}
        for volume in vols:
            if volume['name'] == vol_name:
                volume_detail = volume
        return volume_detail

    def norm_capacity(capacity):
        """
        Normalize the capacity values to bytes
        """
        # Normaize all values to bytes
        norm_capacity = {}
        des = {'B': 'B', 'bytes': 'B', 'b': 'B', 'kib': 'K',
               'KiB': 'K', 'K': 'K', 'k': 'K', 'KB': 'K',
               'mib': 'M', 'MiB': 'M', 'M': 'M', 'm': 'M',
               'MB': 'M', 'gib': 'G', 'GiB': 'G', 'G': 'G',
               'g': 'G', 'GB': 'G', 'Gb': 'G', 'tib': 'T',
               'TiB': 'T', 'TB': 'T', 'T': 'T', 't': 'T'
               }
        val = {'B': 1,
               'K': 1024,
               'M': 1048576,
               'G': 1073741824,
               'T': 1099511627776
               }

        reg_list = re.compile(r'(\S+)\s(\S+)')
        match_list = re.search(reg_list, capacity['list'])
        if match_list is not None:
            mem_value = float(match_list.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['list'] = int(mem_value * norm)
        else:
            raise error.TestFail("Error in parsing capacity value in"
                                 " virsh vol-list")

        match_info = re.search(reg_list, capacity['info'])
        if match_info is not None:
            mem_value = float(match_info.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['info'] = int(mem_value * norm)
        else:
            raise error.TestFail("Error in parsing capacity value "
                                 "in virsh vol-info")

        norm_capacity['qemu_img'] = capacity['qemu_img']
        norm_capacity['xml'] = int(capacity['xml'])

        return norm_capacity

    def check_vol(expected, avail=True):
        """
        Checks the expected volume details with actual volume details from
        vol-dumpxml
        vol-list
        vol-info
        vol-key
        vol-path
        qemu-img info
        """
        error_count = 0

        pv = libvirt_storage.PoolVolume(expected['pool_name'])
        vol_exists = pv.volume_exists(expected['name'])
        if vol_exists:
            if not avail:
                error_count += 1
                logging.error("Expect volume %s not exists but find it",
                              expected['name'])
                return error_count
        else:
            if avail:
                error_count += 1
                logging.error("Expect volume %s exists but not find it",
                              expected['name'])
                return error_count
            else:
                logging.info("Volume %s checked successfully for deletion",
                             expected['name'])
                return error_count

        actual_list = get_vol_list(expected['pool_name'], expected['name'])
        actual_info = pv.volume_info(expected['name'])
        # Get values from vol-dumpxml
        volume_xml = vol_xml.VolXML.new_from_vol_dumpxml(expected['name'],
                                                         expected['pool_name'])

        # Check against virsh vol-key
        vol_key = virsh.vol_key(expected['name'], expected['pool_name'])
        if vol_key.stdout.strip() != volume_xml.key:
            logging.error("Volume key is mismatch \n%s"
                          "Key from xml: %s\nKey from command: %s",
                          expected['name'], volume_xml.key, vol_key)
            error_count += 1
        else:
            logging.debug("virsh vol-key for volume: %s successfully"
                          " checked against vol-dumpxml", expected['name'])

        # Check against virsh vol-name
        get_vol_name = virsh.vol_name(expected['path'])
        if get_vol_name.stdout.strip() != expected['name']:
            logging.error("Volume name mismatch\n"
                          "Expected name: %s\nOutput of vol-name: %s",
                          expected['name'], get_vol_name)

        # Check against virsh vol-path
        vol_path = virsh.vol_path(expected['name'], expected['pool_name'])
        if expected['path'] != vol_path.stdout.strip():
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected path: %s\nOutput of vol-path: %s\n",
                          expected['name'],
                          expected['path'], vol_path)
            error_count += 1
        else:
            logging.debug("virsh vol-path for volume: %s successfully checked"
                          " against created volume path", expected['name'])

        # Check path against virsh vol-list
        if expected['path'] != actual_list['path']:
            logging.error("Volume path mismatch for volume:%s\n"
                          "Expected Path: %s\nPath from virsh vol-list: %s",
                          expected['name'], expected['path'],
                          actual_list['path'])
            error_count += 1
        else:
            logging.debug("Path of volume: %s from virsh vol-list "
                          "successfully checked against created "
                          "volume path", expected['name'])

        # Check path against virsh vol-dumpxml
        if expected['path'] != volume_xml.path:
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected Path: %s\nPath from virsh vol-dumpxml: %s",
                          expected['name'], expected['path'], volume_xml.path)
            error_count += 1

        else:
            logging.debug("Path of volume: %s from virsh vol-dumpxml "
                          "successfully checked against created volume path",
                          expected['name'])

        # Check type against virsh vol-list
        if expected['type'] != actual_list['type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-list: %s",
                          expected['name'], expected['type'],
                          actual_list['type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-list "
                          "successfully checked against the created "
                          "volume type", expected['name'])

        # Check type against virsh vol-info
        if expected['type'] != actual_info['Type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-info: %s",
                          expected['name'], expected['type'],
                          actual_info['Type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-info successfully"
                          " checked against the created volume type",
                          expected['name'])

        # Check name against virsh vol-info
        if expected['name'] != actual_info['Name']:
            logging.error("Volume name mismatch for volume: %s\n"
                          "Expected name: %s\n Name from vol-info: %s",
                          expected['name'],
                          expected['name'], actual_info['Name'])
            error_count += 1
        else:
            logging.debug("Name of volume: %s from virsh vol-info successfully"
                          " checked against the created volume name",
                          expected['name'])

        # Check format from against qemu-img info
        img_info = utils_misc.get_image_info(expected['path'])
        if expected['format']:
            if expected['format'] != img_info['format']:
                logging.error("Volume format mismatch for volume: %s\n"
                              "Expected format: %s\n"
                              "Format from qemu-img info: %s",
                              expected['name'], expected['format'],
                              img_info['format'])
                error_count += 1
            else:
                logging.debug("Format of volume: %s from qemu-img info "
                              "checked successfully against the created "
                              "volume format", expected['name'])

        # Check format against vol-dumpxml
        if expected['format']:
            if expected['format'] != volume_xml.format:
                logging.error("Volume format mismatch for volume: %s\n"
                              "Expected format: %s\n"
                              "Format from vol-dumpxml: %s",
                              expected['name'], expected['format'],
                              volume_xml.format)
                error_count += 1
            else:
                logging.debug("Format of volume: %s from virsh vol-dumpxml "
                              "checked successfully against the created"
                              " volume format", expected['name'])

        logging.info(expected['encrypt_format'])
        # Check encrypt against vol-dumpxml
        if expected['encrypt_format']:
            # As the 'default' format will change to specific valut(qcow), so
            # just output it here
            logging.debug("Encryption format of volume '%s' is: %s",
                          expected['name'], volume_xml.encryption.format)
            # And also output encryption secret uuid
            secret_uuid = volume_xml.encryption.secret['uuid']
            logging.debug("Encryption secret of volume '%s' is: %s",
                          expected['name'], secret_uuid)
            if expected['encrypt_secret']:
                if expected['encrypt_secret'] != secret_uuid:
                    logging.error("Encryption secret mismatch for volume: %s\n"
                                  "Expected secret uuid: %s\n"
                                  "Secret uuid from vol-dumpxml: %s",
                                  expected['name'], expected['encrypt_secret'],
                                  secret_uuid)
                    error_count += 1
                else:
                    # If no set encryption secret value, automatically
                    # generate a secret value at the time of volume creation
                    logging.debug("Volume encryption secret is %s", secret_uuid)

        # Check pool name against vol-pool
        vol_pool = virsh.vol_pool(expected['path'])
        if expected['pool_name'] != vol_pool.stdout.strip():
            logging.error("Pool name mismatch for volume: %s against"
                          "virsh vol-pool", expected['name'])
            error_count += 1
        else:
            logging.debug("Pool name of volume: %s checked successfully"
                          " against the virsh vol-pool", expected['name'])

        norm_cap = {}
        capacity = {}
        capacity['list'] = actual_list['capacity']
        capacity['info'] = actual_info['Capacity']
        capacity['xml'] = volume_xml.capacity
        capacity['qemu_img'] = img_info['vsize']
        norm_cap = norm_capacity(capacity)
        delta_size = params.get('delta_size', "1024")
        if abs(expected['capacity'] - norm_cap['list']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-list\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['list'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-list for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['info']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['info'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-info for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['xml']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-dumpxml\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['xml'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-dumpxml for volume: %s",
                          expected['name'])

        if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against "
                          "qemu-img info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['qemu_img'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " qemu-img info for volume: %s",
                          expected['name'])
        return error_count

    def get_all_secrets():
        """
        Return all exist libvirt secrets uuid in a list
        """
        secret_list = []
        secrets = virsh.secret_list().stdout.strip()
        for secret in secrets.splitlines()[2:]:
            secret_list.append(secret.strip().split()[0])
        return secret_list

    # Initialize the variables
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("volume_name")
    vol_number = int(params.get("number_of_volumes", "2"))
    capacity = params.get("volume_size", "1048576")
    allocation = params.get("volume_allocation", "1048576")
    vol_format = params.get("volume_format")
    source_name = params.get("gluster_source_name", "gluster-vol1")
    source_path = params.get("gluster_source_path", "/")
    encrypt_format = params.get("vol_encrypt_format")
    encrypt_secret = params.get("encrypt_secret")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    try:
        str_capa = utils_misc.normalize_data_size(capacity, "B")
        int_capa = int(str(str_capa).split('.')[0])
    except ValueError:
        raise error.TestError("Translate size %s to 'B' failed" % capacity)
    try:
        str_capa = utils_misc.normalize_data_size(allocation, "B")
        int_allo = int(str(str_capa).split('.')[0])
    except ValueError:
        raise error.TestError("Translate size %s to 'B' failed" % allocation)

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Get exists libvirt secrets before test
    ori_secrets = get_all_secrets()
    expected_vol = {}
    vol_type = 'file'
    if pool_type in ['disk', 'logical']:
        vol_type = 'block'
    if pool_type == 'gluster':
        vol_type = 'network'
    logging.debug("Debug:\npool_name:%s\npool_type:%s\npool_target:%s\n"
                  "vol_name:%s\nvol_number:%s\ncapacity:%s\nallocation:%s\n"
                  "vol_format:%s", pool_name, pool_type, pool_target,
                  vol_name, vol_number, capacity, allocation, vol_format)

    libv_pvt = utlv.PoolVolumeTest(test, params)
    # Run Testcase
    total_err_count = 0
    try:
        # Create a new pool
        libv_pvt.pre_pool(pool_name=pool_name,
                          pool_type=pool_type,
                          pool_target=pool_target,
                          emulated_image=emulated_image,
                          image_size=emulated_image_size,
                          source_name=source_name,
                          source_path=source_path)
        for i in range(vol_number):
            volume_name = "%s_%d" % (vol_name, i)
            expected_vol['pool_name'] = pool_name
            expected_vol['pool_type'] = pool_type
            expected_vol['pool_target'] = pool_target
            expected_vol['capacity'] = int_capa
            expected_vol['allocation'] = int_allo
            expected_vol['format'] = vol_format
            expected_vol['name'] = volume_name
            expected_vol['type'] = vol_type
            expected_vol['encrypt_format'] = encrypt_format
            expected_vol['encrypt_secret'] = encrypt_secret
            # Creates volume
            if pool_type != "gluster":
                expected_vol['path'] = pool_target + '/' + volume_name
                new_volxml = vol_xml.VolXML()
                new_volxml.name = volume_name
                new_volxml.capacity = int_capa
                new_volxml.allocation = int_allo
                if vol_format:
                    new_volxml.format = vol_format
                encrypt_dict = {}
                if encrypt_format:
                    encrypt_dict.update({"format": encrypt_format})
                if encrypt_secret:
                    encrypt_dict.update({"secret": {'uuid': encrypt_secret}})
                if encrypt_dict:
                    new_volxml.encryption = new_volxml.new_encryption(**encrypt_dict)
                logging.debug("Volume XML for creation:\n%s", str(new_volxml))
                virsh.vol_create(pool_name, new_volxml.xml, debug=True)
            else:
                ip_addr = utlv.get_host_ipv4_addr()
                expected_vol['path'] = "gluster://%s/%s/%s" % (ip_addr,
                                                               source_name,
                                                               volume_name)
                utils.run("qemu-img create -f %s %s %s" % (vol_format,
                                                           expected_vol['path'],
                                                           capacity))
            virsh.pool_refresh(pool_name)
            # Check volumes
            total_err_count += check_vol(expected_vol)
            # Delete volume and check for results
            delete_volume(expected_vol)
            total_err_count += check_vol(expected_vol, False)
        if total_err_count > 0:
            raise error.TestFail("Get %s errors when checking volume" % total_err_count)
    finally:
        # Clean up
        for sec in get_all_secrets():
            if sec not in ori_secrets:
                virsh.secret_undefine(sec)
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image, source_name=source_name)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
Exemplo n.º 22
0
def create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=True):
    """
    Setup vm ceph disk with given parameters

    :param vm: the vm object
    :param params: dict, dict include setup vm disk xml configurations
    :param is_setup: one parameter indicate whether setup or clean up
    """
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
    logging.debug("original xml is: %s", vmxml)

    # Device related configurations
    device_format = params.get("virt_disk_device_format", "raw")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdb")
    hotplug = "yes" == params.get("virt_disk_device_hotplug", "no")
    keep_raw_image_as = "yes" == params.get("keep_raw_image_as", "no")

    # Ceph related configurations
    ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
    ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
    ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
    ceph_client_name = params.get("ceph_client_name")
    ceph_client_key = params.get("ceph_client_key")
    ceph_auth_user = params.get("ceph_auth_user")
    ceph_auth_key = params.get("ceph_auth_key")
    auth_sec_usage_type = params.get("ceph_auth_sec_usage_type", "ceph")
    storage_size = params.get("storage_size", "1G")
    img_file = params.get("ceph_image_file")
    attach_option = params.get("virt_device_attach_option", "--live")
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    key_opt = ""
    is_local_img_file = True if img_file is None else False
    rbd_key_file = None

    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    disk_auth_dict = None
    auth_sec_uuid = None
    names = ceph_disk_name.split('/')
    pool_name = names[0]
    image_name = names[1]
    if not utils_package.package_install(["ceph-common"]):
        raise exceptions.TestError("Failed to install ceph-common")

    # Create config file if it doesn't exist
    ceph_cfg = ceph.create_config_file(ceph_mon_ip)
    # If enable auth, prepare a local file to save key
    if ceph_client_name and ceph_client_key:
        with open(key_file, 'w') as f:
            f.write("[%s]\n\tkey = %s\n" %
                    (ceph_client_name, ceph_client_key))
        key_opt = "--keyring %s" % key_file
        rbd_key_file = key_file
    if is_setup:
        # If enable auth, prepare disk auth
        if ceph_client_name and ceph_client_key:
            auth_sec_uuid = _create_secret(auth_sec_usage_type, ceph_auth_key)
            disk_auth_dict = {"auth_user": ceph_auth_user,
                              "secret_type": auth_sec_usage_type,
                              "secret_uuid": auth_sec_uuid}
        # clean up image file if exists
        ceph.rbd_image_rm(ceph_mon_ip, pool_name,
                          image_name, keyfile=rbd_key_file)

        #Create necessary image file if not exists
        _create_image(device_format, img_file, vm.name, storage_size,
                      ceph_disk_name, ceph_mon_ip, key_opt,
                      ceph_auth_user, ceph_auth_key)

        # Disk related config
        disk_src_dict = {"attrs": {"protocol": "rbd",
                                   "name": ceph_disk_name},
                         "hosts":  [{"name": ceph_mon_ip,
                                     "port": ceph_host_port}]}
        # Create network disk
        disk_xml = libvirt_disk.create_primitive_disk_xml("network", device, device_target, device_bus,
                                                          device_format, disk_src_dict, disk_auth_dict)
        if not keep_raw_image_as:
            if hotplug:
                virsh.attach_device(vm.name, disk_xml.xml,
                                    flagstr=attach_option, ignore_status=False, debug=True)
            else:
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
                vmxml.add_device(disk_xml)
                vmxml.sync()
    else:
        ceph.rbd_image_rm(ceph_mon_ip, pool_name,
                          image_name, keyfile=rbd_key_file)
        # Remove ceph config and key file if created.
        for file_path in [ceph_cfg, key_file]:
            if os.path.exists(file_path):
                os.remove(file_path)
        if is_local_img_file and img_file and os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid, ignore_status=True)
Exemplo n.º 23
0
def run(test, params, env):
    """
    1. Create a pool
    2. Create n number of volumes(vol-create-as)
    3. Check the volume details from the following commands
       vol-info
       vol-key
       vol-list
       vol-name
       vol-path
       vol-pool
       qemu-img info
    4. Delete the volume and check in vol-list
    5. Repeat the steps for number of volumes given
    6. Delete the pool and target
    TODO: Handle negative testcases
    """
    def delete_volume(expected_vol):
        """
        Deletes Volume
        """
        pool_name = expected_vol['pool_name']
        vol_name = expected_vol['name']
        pv = libvirt_storage.PoolVolume(pool_name)
        if not pv.delete_volume(vol_name):
            test.fail("Delete volume failed." % vol_name)
        else:
            logging.debug("Volume: %s successfully deleted on pool: %s",
                          vol_name, pool_name)

    def get_vol_list(pool_name, vol_name):
        """
        Parse the volume list
        """
        output = virsh.vol_list(pool_name, "--details")
        rg = re.compile(
            r'^(\S+)\s+(\S+)\s+(\S+)\s+(\d+.\d+\s\S+)\s+(\d+.\d+.*)')
        vol = {}
        vols = []
        volume_detail = None
        for line in output.stdout.splitlines():
            match = re.search(rg, line.lstrip())
            if match is not None:
                vol['name'] = match.group(1)
                vol['path'] = match.group(2)
                vol['type'] = match.group(3)
                vol['capacity'] = match.group(4)
                vol['allocation'] = match.group(5)
                vols.append(vol)
                vol = {}
        for volume in vols:
            if volume['name'] == vol_name:
                volume_detail = volume
        return volume_detail

    def norm_capacity(capacity):
        """
        Normalize the capacity values to bytes
        """
        # Normaize all values to bytes
        norm_capacity = {}
        des = {
            'B': 'B',
            'bytes': 'B',
            'b': 'B',
            'kib': 'K',
            'KiB': 'K',
            'K': 'K',
            'k': 'K',
            'KB': 'K',
            'mib': 'M',
            'MiB': 'M',
            'M': 'M',
            'm': 'M',
            'MB': 'M',
            'gib': 'G',
            'GiB': 'G',
            'G': 'G',
            'g': 'G',
            'GB': 'G',
            'Gb': 'G',
            'tib': 'T',
            'TiB': 'T',
            'TB': 'T',
            'T': 'T',
            't': 'T'
        }
        val = {
            'B': 1,
            'K': 1024,
            'M': 1048576,
            'G': 1073741824,
            'T': 1099511627776
        }

        reg_list = re.compile(r'(\S+)\s(\S+)')
        match_list = re.search(reg_list, capacity['list'])
        if match_list is not None:
            mem_value = float(match_list.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['list'] = int(mem_value * norm)
        else:
            test.fail("Error in parsing capacity value in" " virsh vol-list")

        match_info = re.search(reg_list, capacity['info'])
        if match_info is not None:
            mem_value = float(match_info.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['info'] = int(mem_value * norm)
        else:
            test.fail("Error in parsing capacity value " "in virsh vol-info")

        norm_capacity['qemu_img'] = capacity['qemu_img']
        norm_capacity['xml'] = int(capacity['xml'])

        return norm_capacity

    def check_vol(expected, avail=True):
        """
        Checks the expected volume details with actual volume details from
        vol-dumpxml
        vol-list
        vol-info
        vol-key
        vol-path
        qemu-img info
        """
        error_count = 0

        pv = libvirt_storage.PoolVolume(expected['pool_name'])
        vol_exists = pv.volume_exists(expected['name'])
        if vol_exists:
            if not avail:
                error_count += 1
                logging.error("Expect volume %s not exists but find it",
                              expected['name'])
                return error_count
        else:
            if avail:
                error_count += 1
                logging.error("Expect volume %s exists but not find it",
                              expected['name'])
                return error_count
            else:
                logging.info("Volume %s checked successfully for deletion",
                             expected['name'])
                return error_count

        actual_list = get_vol_list(expected['pool_name'], expected['name'])
        actual_info = pv.volume_info(expected['name'])
        # Get values from vol-dumpxml
        volume_xml = vol_xml.VolXML.new_from_vol_dumpxml(
            expected['name'], expected['pool_name'])

        # Check against virsh vol-key
        vol_key = virsh.vol_key(expected['name'], expected['pool_name'])
        if vol_key.stdout.strip() != volume_xml.key:
            logging.error(
                "Volume key is mismatch \n%s"
                "Key from xml: %s\nKey from command: %s", expected['name'],
                volume_xml.key, vol_key)
            error_count += 1
        else:
            logging.debug(
                "virsh vol-key for volume: %s successfully"
                " checked against vol-dumpxml", expected['name'])

        # Check against virsh vol-name
        get_vol_name = virsh.vol_name(expected['path'])
        if get_vol_name.stdout.strip() != expected['name']:
            logging.error(
                "Volume name mismatch\n"
                "Expected name: %s\nOutput of vol-name: %s", expected['name'],
                get_vol_name)

        # Check against virsh vol-path
        vol_path = virsh.vol_path(expected['name'], expected['pool_name'])
        if expected['path'] != vol_path.stdout.strip():
            logging.error(
                "Volume path mismatch for volume: %s\n"
                "Expected path: %s\nOutput of vol-path: %s\n",
                expected['name'], expected['path'], vol_path)
            error_count += 1
        else:
            logging.debug(
                "virsh vol-path for volume: %s successfully checked"
                " against created volume path", expected['name'])

        # Check path against virsh vol-list
        if expected['path'] != actual_list['path']:
            logging.error(
                "Volume path mismatch for volume:%s\n"
                "Expected Path: %s\nPath from virsh vol-list: %s",
                expected['name'], expected['path'], actual_list['path'])
            error_count += 1
        else:
            logging.debug(
                "Path of volume: %s from virsh vol-list "
                "successfully checked against created "
                "volume path", expected['name'])

        # Check path against virsh vol-dumpxml
        if expected['path'] != volume_xml.path:
            logging.error(
                "Volume path mismatch for volume: %s\n"
                "Expected Path: %s\nPath from virsh vol-dumpxml: %s",
                expected['name'], expected['path'], volume_xml.path)
            error_count += 1

        else:
            logging.debug(
                "Path of volume: %s from virsh vol-dumpxml "
                "successfully checked against created volume path",
                expected['name'])

        # Check type against virsh vol-list
        if expected['type'] != actual_list['type']:
            logging.error(
                "Volume type mismatch for volume: %s\n"
                "Expected Type: %s\n Type from vol-list: %s", expected['name'],
                expected['type'], actual_list['type'])
            error_count += 1
        else:
            logging.debug(
                "Type of volume: %s from virsh vol-list "
                "successfully checked against the created "
                "volume type", expected['name'])

        # Check type against virsh vol-info
        if expected['type'] != actual_info['Type']:
            logging.error(
                "Volume type mismatch for volume: %s\n"
                "Expected Type: %s\n Type from vol-info: %s", expected['name'],
                expected['type'], actual_info['Type'])
            error_count += 1
        else:
            logging.debug(
                "Type of volume: %s from virsh vol-info successfully"
                " checked against the created volume type", expected['name'])

        # Check name against virsh vol-info
        if expected['name'] != actual_info['Name']:
            logging.error(
                "Volume name mismatch for volume: %s\n"
                "Expected name: %s\n Name from vol-info: %s", expected['name'],
                expected['name'], actual_info['Name'])
            error_count += 1
        else:
            logging.debug(
                "Name of volume: %s from virsh vol-info successfully"
                " checked against the created volume name", expected['name'])

        # Check format from against qemu-img info
        img_info = utils_misc.get_image_info(expected['path'])
        if expected['format']:
            if expected['format'] != img_info['format']:
                logging.error(
                    "Volume format mismatch for volume: %s\n"
                    "Expected format: %s\n"
                    "Format from qemu-img info: %s", expected['name'],
                    expected['format'], img_info['format'])
                error_count += 1
            else:
                logging.debug(
                    "Format of volume: %s from qemu-img info "
                    "checked successfully against the created "
                    "volume format", expected['name'])

        # Check format against vol-dumpxml
        if expected['format']:
            if expected['format'] != volume_xml.format:
                logging.error(
                    "Volume format mismatch for volume: %s\n"
                    "Expected format: %s\n"
                    "Format from vol-dumpxml: %s", expected['name'],
                    expected['format'], volume_xml.format)
                error_count += 1
            else:
                logging.debug(
                    "Format of volume: %s from virsh vol-dumpxml "
                    "checked successfully against the created"
                    " volume format", expected['name'])

        logging.info(expected['encrypt_format'])
        # Check encrypt against vol-dumpxml
        if expected['encrypt_format']:
            # As the 'default' format will change to specific valut(qcow), so
            # just output it here
            logging.debug("Encryption format of volume '%s' is: %s",
                          expected['name'], volume_xml.encryption.format)
            # And also output encryption secret uuid
            secret_uuid = volume_xml.encryption.secret['uuid']
            logging.debug("Encryption secret of volume '%s' is: %s",
                          expected['name'], secret_uuid)
            if expected['encrypt_secret']:
                if expected['encrypt_secret'] != secret_uuid:
                    logging.error(
                        "Encryption secret mismatch for volume: %s\n"
                        "Expected secret uuid: %s\n"
                        "Secret uuid from vol-dumpxml: %s", expected['name'],
                        expected['encrypt_secret'], secret_uuid)
                    error_count += 1
                else:
                    # If no set encryption secret value, automatically
                    # generate a secret value at the time of volume creation
                    logging.debug("Volume encryption secret is %s",
                                  secret_uuid)

        # Check pool name against vol-pool
        vol_pool = virsh.vol_pool(expected['path'])
        if expected['pool_name'] != vol_pool.stdout.strip():
            logging.error(
                "Pool name mismatch for volume: %s against"
                "virsh vol-pool", expected['name'])
            error_count += 1
        else:
            logging.debug(
                "Pool name of volume: %s checked successfully"
                " against the virsh vol-pool", expected['name'])

        norm_cap = {}
        capacity = {}
        capacity['list'] = actual_list['capacity']
        capacity['info'] = actual_info['Capacity']
        capacity['xml'] = volume_xml.capacity
        capacity['qemu_img'] = img_info['vsize']
        norm_cap = norm_capacity(capacity)
        delta_size = int(params.get('delta_size', "1024"))
        if abs(expected['capacity'] - norm_cap['list']) > delta_size:
            logging.error(
                "Capacity mismatch for volume: %s against virsh"
                " vol-list\nExpected: %s\nActual: %s", expected['name'],
                expected['capacity'], norm_cap['list'])
            error_count += 1
        else:
            logging.debug(
                "Capacity value checked successfully against"
                " virsh vol-list for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['info']) > delta_size:
            logging.error(
                "Capacity mismatch for volume: %s against virsh"
                " vol-info\nExpected: %s\nActual: %s", expected['name'],
                expected['capacity'], norm_cap['info'])
            error_count += 1
        else:
            logging.debug(
                "Capacity value checked successfully against"
                " virsh vol-info for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['xml']) > delta_size:
            logging.error(
                "Capacity mismatch for volume: %s against virsh"
                " vol-dumpxml\nExpected: %s\nActual: %s", expected['name'],
                expected['capacity'], norm_cap['xml'])
            error_count += 1
        else:
            logging.debug(
                "Capacity value checked successfully against"
                " virsh vol-dumpxml for volume: %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size:
            logging.error(
                "Capacity mismatch for volume: %s against "
                "qemu-img info\nExpected: %s\nActual: %s", expected['name'],
                expected['capacity'], norm_cap['qemu_img'])
            error_count += 1
        else:
            logging.debug(
                "Capacity value checked successfully against"
                " qemu-img info for volume: %s", expected['name'])
        return error_count

    def get_all_secrets():
        """
        Return all exist libvirt secrets uuid in a list
        """
        secret_list = []
        secrets = virsh.secret_list().stdout.strip()
        for secret in secrets.splitlines()[2:]:
            secret_list.append(secret.strip().split()[0])
        return secret_list

    # Initialize the variables
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("volume_name")
    vol_number = int(params.get("number_of_volumes", "2"))
    capacity = params.get("volume_size", "1048576")
    allocation = params.get("volume_allocation", "1048576")
    vol_format = params.get("volume_format")
    source_name = params.get("gluster_source_name", "gluster-vol1")
    source_path = params.get("gluster_source_path", "/")
    encrypt_format = params.get("vol_encrypt_format")
    encrypt_secret = params.get("encrypt_secret")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    try:
        str_capa = utils_misc.normalize_data_size(capacity, "B")
        int_capa = int(str(str_capa).split('.')[0])
    except ValueError:
        test.error("Translate size %s to 'B' failed" % capacity)
    try:
        str_capa = utils_misc.normalize_data_size(allocation, "B")
        int_allo = int(str(str_capa).split('.')[0])
    except ValueError:
        test.error("Translate size %s to 'B' failed" % allocation)

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Get exists libvirt secrets before test
    ori_secrets = get_all_secrets()
    expected_vol = {}
    vol_type = 'file'
    if pool_type in ['disk', 'logical']:
        vol_type = 'block'
    if pool_type == 'gluster':
        vol_type = 'network'
    logging.debug(
        "Debug:\npool_name:%s\npool_type:%s\npool_target:%s\n"
        "vol_name:%s\nvol_number:%s\ncapacity:%s\nallocation:%s\n"
        "vol_format:%s", pool_name, pool_type, pool_target, vol_name,
        vol_number, capacity, allocation, vol_format)

    libv_pvt = utlv.PoolVolumeTest(test, params)
    # Run Testcase
    total_err_count = 0
    try:
        # Create a new pool
        libv_pvt.pre_pool(pool_name=pool_name,
                          pool_type=pool_type,
                          pool_target=pool_target,
                          emulated_image=emulated_image,
                          image_size=emulated_image_size,
                          source_name=source_name,
                          source_path=source_path)
        for i in range(vol_number):
            volume_name = "%s_%d" % (vol_name, i)
            expected_vol['pool_name'] = pool_name
            expected_vol['pool_type'] = pool_type
            expected_vol['pool_target'] = pool_target
            expected_vol['capacity'] = int_capa
            expected_vol['allocation'] = int_allo
            expected_vol['format'] = vol_format
            expected_vol['name'] = volume_name
            expected_vol['type'] = vol_type
            expected_vol['encrypt_format'] = encrypt_format
            expected_vol['encrypt_secret'] = encrypt_secret
            # Creates volume
            if pool_type != "gluster":
                expected_vol['path'] = pool_target + '/' + volume_name
                new_volxml = vol_xml.VolXML()
                new_volxml.name = volume_name
                new_volxml.capacity = int_capa
                new_volxml.allocation = int_allo
                if vol_format:
                    new_volxml.format = vol_format
                encrypt_dict = {}
                if encrypt_format:
                    encrypt_dict.update({"format": encrypt_format})
                if encrypt_secret:
                    encrypt_dict.update({"secret": {'uuid': encrypt_secret}})
                if encrypt_dict:
                    new_volxml.encryption = new_volxml.new_encryption(
                        **encrypt_dict)
                logging.debug("Volume XML for creation:\n%s", str(new_volxml))
                virsh.vol_create(pool_name, new_volxml.xml, debug=True)
            else:
                ip_addr = utlv.get_host_ipv4_addr()
                expected_vol['path'] = "gluster://%s/%s/%s" % (
                    ip_addr, source_name, volume_name)
                process.run("qemu-img create -f %s %s %s" %
                            (vol_format, expected_vol['path'], capacity),
                            shell=True)
            virsh.pool_refresh(pool_name)
            # Check volumes
            total_err_count += check_vol(expected_vol)
            # Delete volume and check for results
            delete_volume(expected_vol)
            total_err_count += check_vol(expected_vol, False)
        if total_err_count > 0:
            test.fail("Get %s errors when checking volume" % total_err_count)
    finally:
        # Clean up
        for sec in get_all_secrets():
            if sec not in ori_secrets:
                virsh.secret_undefine(sec)
        try:
            libv_pvt.cleanup_pool(pool_name,
                                  pool_type,
                                  pool_target,
                                  emulated_image,
                                  source_name=source_name)
        except test.fail as detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
Exemplo n.º 24
0
def run(test, params, env):
    """
    Test virsh blockcopy --xml option.

    1.Prepare backend storage (file/block/iscsi/ceph/nbd)
    2.Start VM
    3.Prepare target xml
    4.Execute virsh blockcopy --xml command
    5.Check VM xml after operation accomplished
    6.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    ignore_check = False

    def check_blockcopy_xml(vm_name, source_image, ignore_check=False):
        """
        Check blockcopy xml in VM.

        :param vm_name: VM name
        :param source_image: source image name.
        :param ignore_check: default is False.
        """
        if ignore_check:
            return
        source_imge_list = []
        blklist = virsh.domblklist(vm_name).stdout_text.splitlines()
        for line in blklist:
            if line.strip().startswith(('hd', 'vd', 'sd', 'xvd')):
                source_imge_list.append(line.split()[-1])
        logging.debug('domblklist %s:\n%s', vm_name, source_imge_list)
        if not any(source_image in s for s in source_imge_list):
            test.fail("Cannot find expected source image: %s" % source_image)

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")
    blockcopy_option = params.get("blockcopy_option")

    # Backend storage auth info
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")
    use_auth_usage = "yes" == params.get("use_auth_usage")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    auth_sec_uuid = ""
    disk_auth_dict = {}
    size = "1"

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")

    # Initialize one NbdExport object
    nbd = None
    img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name)
    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Additional disk images.
    disks_img = []
    try:
        # Clean up dirty secrets in test environments if there are.
        libvirt_secret.clean_up_secrets()
        # Setup backend storage
        if backend_storage_type == "file":
            image_filename = params.get("image_filename", "raw.img")
            disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
            if blockcopy_option in ['reuse_external']:
                device_source = libvirt.create_local_disk(
                    backend_storage_type, disk_path, storage_size,
                    device_format)
            else:
                device_source = disk_path
            disks_img.append({
                "format": device_format,
                "source": disk_path,
                "path": disk_path
            })
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
            checkout_device_source = image_filename
        elif backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
                checkout_device_source = device_source
            elif device_type == "network":
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_passwd", "password")
                auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi")
                auth_sec_dict = {
                    "sec_usage": "iscsi",
                    "sec_target": auth_sec_usage
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                # Set password of auth secret
                virsh.secret_set_value(auth_sec_uuid,
                                       chap_passwd,
                                       encode=True,
                                       debug=True)
                iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                    is_setup=True,
                    is_login=False,
                    image_size=storage_size,
                    chap_user=chap_user,
                    chap_passwd=chap_passwd,
                    portal_ip=iscsi_host)
                # ISCSI auth attributes for disk xml
                disk_auth_dict = {
                    "auth_user": chap_user,
                    "secret_type": auth_sec_usage_type,
                    "secret_usage": auth_sec_usage_target
                }
                device_source = "iscsi://%s:%s/%s/%s" % (
                    iscsi_host, iscsi_port, iscsi_target, lun_num)
                disk_src_dict = {
                    "attrs": {
                        "protocol": "iscsi",
                        "name": "%s/%s" % (iscsi_target, lun_num)
                    },
                    "hosts": [{
                        "name": iscsi_host,
                        "port": iscsi_port
                    }]
                }
                checkout_device_source = 'emulated-iscsi'
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            size = "0.15"

            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm whether it needs delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            # If enable auth, prepare a local file to save key
            if ceph_client_name and ceph_client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (ceph_client_name, ceph_client_key))
                key_opt = "--keyring %s" % key_file
                auth_sec_dict = {
                    "sec_usage": auth_sec_usage_type,
                    "sec_name": "ceph_auth_secret"
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid,
                                       ceph_auth_key,
                                       ignore_status=False,
                                       debug=True)
                disk_auth_dict = {
                    "auth_user": ceph_auth_user,
                    "secret_type": auth_sec_usage_type,
                    "secret_uuid": auth_sec_uuid
                }
            else:
                test.error("No ceph client name/key provided.")
            device_source = "rbd:%s:mon_host=%s:keyring=%s" % (
                ceph_disk_name, ceph_mon_ip, key_file)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            if blockcopy_option in ['reuse_external']:
                # Create an local image and make FS on it.
                libvirt.create_local_disk("file", img_file, storage_size,
                                          device_format)
                # Convert the image to remote storage
                disk_path = ("rbd:%s:mon_host=%s" %
                             (ceph_disk_name, ceph_mon_ip))
                if ceph_client_name and ceph_client_key:
                    disk_path += (":id=%s:key=%s" %
                                  (ceph_auth_user, ceph_auth_key))
                rbd_cmd = (
                    "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                    " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name,
                                   device_format, img_file, disk_path))
                process.run(rbd_cmd, ignore_status=False, shell=True)
            disk_src_dict = {
                "attrs": {
                    "protocol": "rbd",
                    "name": ceph_disk_name
                },
                "hosts": [{
                    "name": ceph_host_ip,
                    "port": ceph_host_port
                }]
            }
            checkout_device_source = ceph_disk_name
        elif backend_storage_type == "nbd":
            # Get server hostname.
            hostname = socket.gethostname().strip()
            # Setup backend storage
            nbd_server_host = hostname
            nbd_server_port = params.get("nbd_server_port")
            image_path = params.get("emulated_image",
                                    "/var/lib/libvirt/images/nbdtest.img")
            # Create NbdExport object
            nbd = NbdExport(image_path,
                            image_format=device_format,
                            port=nbd_server_port)
            nbd.start_nbd_server()
            # Prepare disk source xml
            source_attrs_dict = {"protocol": "nbd"}
            disk_src_dict = {}
            disk_src_dict.update({"attrs": source_attrs_dict})
            disk_src_dict.update({
                "hosts": [{
                    "name": nbd_server_host,
                    "port": nbd_server_port
                }]
            })
            device_source = "nbd://%s:%s/%s" % (nbd_server_host,
                                                nbd_server_port, image_path)
            checkout_device_source = image_path
            if blockcopy_option in ['pivot']:
                ignore_check = True

        logging.debug("device source is: %s", device_source)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        auth_in_source = True
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        device_source_path = os.path.join(data_dir.get_tmp_dir(), "source.raw")
        tmp_device_source = libvirt.create_local_disk("file",
                                                      path=device_source_path,
                                                      size=size,
                                                      disk_format="raw")
        s_attach = virsh.attach_disk(vm_name,
                                     tmp_device_source,
                                     device_target,
                                     "--config",
                                     debug=True)
        libvirt.check_exit_status(s_attach)
        try:
            vm.start()
            vm.wait_for_login().close()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s", str(xml_error))
        except virt_vm.VMStartError as details:
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s",
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        # Additional operations before set block threshold
        options = params.get("options",
                             "--pivot --transient-job --verbose --wait")
        result = virsh.blockcopy(vm_name,
                                 device_target,
                                 "--xml %s" % disk_xml.xml,
                                 options=options,
                                 debug=True)
        libvirt.check_exit_status(result)
        check_source_image = None
        if blockcopy_option in ['pivot']:
            check_source_image = checkout_device_source
        else:
            check_source_image = tmp_device_source
        check_blockcopy_xml(vm_name, check_source_image, ignore_check)
    finally:
        # Delete snapshots.
        if virsh.domain_exists(vm_name):
            #To Delete snapshot, destroy vm first.
            if vm.is_alive():
                vm.destroy()
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        vmxml_backup.sync("--snapshots-metadata")

        if os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        for img in disks_img:
            if os.path.exists(img["path"]):
                libvirt.delete_local_disk("file", img["path"])
        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result.stdout_text)
            if os.path.exists(key_file):
                os.remove(key_file)
        elif backend_storage_type == "nbd":
            if nbd:
                try:
                    nbd.cleanup()
                except Exception as ndbEx:
                    logging.error("Clean Up nbd failed: %s" % str(ndbEx))
        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
Exemplo n.º 25
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare pool, volume.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def create_pool(p_name, p_type, p_target):
        """
        Define and start a pool.

        :param p_name. Pool name.
        :param p_type. Pool type.
        :param p_target. Pool target path.
        """
        p_xml = pool_xml.PoolXML(pool_type=p_type)
        p_xml.name = p_name
        p_xml.target_path = p_target

        if not os.path.exists(p_target):
            os.mkdir(p_target)
        p_xml.xmltreefile.write()
        ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_build(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_start(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, target_encrypt_params, vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param target_encrypt_params encrypt parameters in dict.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.encryption = volxml.new_encryption(**target_encrypt_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_secret(vol_path):
        """
        Create secret.

        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        libvirt.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as e:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(secret_password_no_encoded.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string,
                                     **virsh_dargs)
        libvirt.check_exit_status(ret)
        return encryption_uuid

    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status("rpm -q parted || "
                                          "yum install -y parted", 300)
            if rpm_stat != 0:
                test.fail("Failed to query/install parted, make sure"
                          " that you have usable repo in guest")

            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            device_source = os.path.join(os.sep, 'dev', added_part)
            libvirt.mk_label(device_source, session=session)
            libvirt.mk_part(device_source, size="10M", session=session)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % added_part)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            session.close()
            if s != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    encryption_in_source = "yes" == params.get("encryption_in_source")
    encryption_out_source = "yes" == params.get("encryption_out_source")
    if encryption_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Cannot put <encryption> inside disk <source> in "
                    "this libvirt version.")
    # Pool/Volume options.
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    volume_name = params.get("vol_name")
    volume_alloc = params.get("vol_alloc")
    volume_cap_unit = params.get("vol_cap_unit")
    volume_cap = params.get("vol_cap")
    volume_target_path = params.get("target_path")
    volume_target_format = params.get("target_format")
    volume_target_encypt = params.get("target_encypt", "")
    volume_target_label = params.get("target_label")

    hotplug = "yes" == params.get("virt_disk_device_hotplug")
    status_error = "yes" == params.get("status_error")
    secret_type = params.get("secret_type", "passphrase")
    secret_password_no_encoded = params.get("secret_password_no_encoded", "redhat")
    virt_disk_qcow2_format = "yes" == params.get("virt_disk_qcow2_format")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    sec_encryption_uuid = None
    try:
        # Prepare the disk.
        sec_uuids = []
        create_pool(pool_name, pool_type, pool_target)
        vol_params = {"name": volume_name, "capacity": int(volume_cap),
                      "allocation": int(volume_alloc), "format":
                      volume_target_format, "path": volume_target_path,
                      "label": volume_target_label,
                      "capacity_unit": volume_cap_unit}
        vol_encryption_params = {}
        vol_encryption_params.update({"format": volume_target_encypt})
        # For any disk format other than qcow2, it need create secret firstly.
        if not virt_disk_qcow2_format:
            # create secret.
            sec_encryption_uuid = create_secret(volume_target_path)
            sec_uuids.append(sec_encryption_uuid)
            vol_encryption_params.update({"secret": {"type": secret_type, "uuid": sec_encryption_uuid}})
        try:
            # If Libvirt version is lower than 2.5.0
            # Creating luks encryption volume is not supported,so skip it.
            create_vol(pool_name, vol_encryption_params, vol_params)
        except AssertionError as info:
            err_msgs = ("create: invalid option")
            if str(info).count(err_msgs):
                test.error("Creating luks encryption volume "
                           "is not supported on this libvirt version")
            else:
                test.error("Failed to create volume."
                           "Error: %s" % str(info))
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        if device_type == "file":
            dev_attrs = "file"
        elif device_type == "dir":
            dev_attrs = "dir"
        else:
            dev_attrs = "dev"
        disk_source = disk_xml.new_disk_source(
                **{"attrs": {dev_attrs: volume_target_path}})
        disk_xml.driver = {"name": "qemu", "type": volume_target_format,
                           "cache": "none"}
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuids.append(v_xml.encryption.secret["uuid"])
        if not status_error:
            logging.debug("vol info -- format: %s, type: %s, uuid: %s",
                          v_xml.encryption.format,
                          v_xml.encryption.secret["type"],
                          v_xml.encryption.secret["uuid"])
            encryption_dict = {"encryption": v_xml.encryption.format,
                               "secret": {"type": v_xml.encryption.secret["type"],
                                          "uuid": v_xml.encryption.secret["uuid"]}}
            if encryption_in_source:
                disk_source.encryption = disk_xml.new_encryption(
                        **encryption_dict)
            if encryption_out_source:
                disk_xml.encryption = disk_xml.new_encryption(
                        **encryption_dict)
        disk_xml.source = disk_source
        logging.debug("disk xml is:\n%s" % disk_xml)
        if not hotplug:
            # Sync VM xml.
            vmxml.add_device(disk_xml)
            vmxml.sync()

        try:
            # Start the VM and do disk hotplug if required,
            # then check disk status in vm.
            # Note that LUKS encrypted virtual disk without <encryption>
            # can be normally started or attached since qemu will just treat
            # it as RAW, so we don't test LUKS with status_error=TRUE.
            vm.start()
            vm.wait_for_login()
            if status_error:
                if hotplug:
                    logging.debug("attaching disk, expecting error...")
                    result = virsh.attach_device(vm_name, disk_xml.xml)
                    libvirt.check_exit_status(result, status_error)
                else:
                    test.fail("VM started unexpectedly.")
            else:
                if hotplug:
                    result = virsh.attach_device(vm_name, disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
                    result = virsh.detach_device(vm_name, disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                else:
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
        except virt_vm.VMStartError as e:
            if status_error:
                if hotplug:
                    test.fail("In hotplug scenario, VM should "
                              "start successfully but not."
                              "Error: %s", str(e))
                else:
                    logging.debug("VM failed to start as expected."
                                  "Error: %s", str(e))
            else:
                # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
                # longer supported.
                err_msgs = ("AES-CBC encrypted qcow2 images is"
                            " no longer supported in system emulators")
                if str(e).count(err_msgs):
                    test.cancel(err_msgs)
                else:
                    test.fail("VM failed to start."
                              "Error: %s" % str(e))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
            if not check_in_vm(vm, device_target, old_parts):
                raise exceptions.TestFail("Check encryption disk in VM failed")
        except virt_vm.VMStartError, e:
            if status_error:
                logging.debug("VM failed to start as expected."
                              "Error: %s", str(e))
                pass
            else:
                # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is longer supported.
                err_msgs = ("AES-CBC encrypted qcow2 images is"
                            " no longer supported in system emulators")
                if str(e).count(err_msgs):
                    exceptions.TestSkipError(err_msgs)
                else:
                    raise exceptions.TestFail("VM failed to start."
                                              "Error: %s" % str(e))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if virsh.pool_state_dict().has_key(pool_name):
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
Exemplo n.º 27
0
def run(test, params, env):
    """
    Test the tpm virtual devices
    1. prepare a guest with different tpm devices
    2. check whether the guest can be started
    3. check the xml and qemu cmd line, even swtpm for vtpm
    4. check tpm usage in guest os
    """
    # Tpm passthrough supported since libvirt 1.0.5.
    if not libvirt_version.version_compare(1, 0, 5):
        test.cancel("Tpm device is not supported "
                    "on current libvirt version.")
    # Tpm passthrough supported since qemu 2.12.0-49.
    if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False):
        test.cancel("Tpm device is not supported " "on current qemu version.")

    tpm_model = params.get("tpm_model")
    backend_type = params.get("backend_type")
    backend_version = params.get("backend_version")
    device_path = params.get("device_path")
    tpm_num = int(params.get("tpm_num", 1))
    # After first start of vm with vtpm, do operations, check it still works
    vm_operate = params.get("vm_operate")
    # Sub-operation(e.g.domrename) under vm_operate(e.g.restart)
    vm_oprt = params.get("vm_oprt")
    secret_uuid = params.get("secret_uuid")
    secret_value = params.get("secret_value")
    # Change encryption state: from plain to encrypted, or reverse.
    encrypt_change = params.get("encrypt_change")
    secret_uuid = params.get("secret_uuid")
    prepare_secret = ("yes" == params.get("prepare_secret", "no"))
    remove_dev = ("yes" == params.get("remove_dev", "no"))
    multi_vms = ("yes" == params.get("multi_vms", "no"))
    # Remove swtpm state file
    rm_statefile = ("yes" == params.get("rm_statefile", "no"))
    test_suite = ("yes" == params.get("test_suite", "no"))
    restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no"))
    no_backend = ("yes" == params.get("no_backend", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    err_msg = params.get("xml_errmsg", "")
    loader = params.get("loader", "")
    nvram = params.get("nvram", "")
    uefi_disk_url = params.get("uefi_disk_url", "")
    download_file_path = os.path.join(data_dir.get_data_dir(),
                                      "uefi_disk.qcow2")

    # Tpm emulator tpm-tis_model for aarch64 supported since libvirt 7.1.0
    if platform.machine() == 'aarch64' and tpm_model == 'tpm-tis' \
        and backend_type == 'emulator' \
            and not libvirt_version.version_compare(7, 1, 0):

        test.cancel("Tpm emulator tpm-tis_model for aarch64 "
                    "is not supported on current libvirt")

    # Check tpm chip on host for passthrough testing
    if backend_type == "passthrough":
        dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True)
        logging.debug("dmesg info about tpm:\n %s", dmesg_info)
        dmesg_error = re.search("No TPM chip found|TPM is disabled",
                                dmesg_info)
        if dmesg_error:
            test.cancel(dmesg_error.group())
        else:
            # Try to check host tpm chip version
            tpm_v = None
            if re.search("2.0 TPM", dmesg_info):
                tpm_v = "2.0"
                if not utils_package.package_install("tpm2-tools"):
                    # package_install() return 'True' if succeed
                    test.error("Failed to install tpm2-tools on host")
            else:
                if re.search("1.2 TPM", dmesg_info):
                    tpm_v = "1.2"
                # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first
                if not utils_package.package_install("tpm-tools"):
                    if tpm_v == "1.2":
                        test.error("Failed to install tpm-tools on host")
                    else:
                        logging.debug("Failed to install tpm-tools on host")
    # Check host env for vtpm testing
    elif backend_type == "emulator":
        if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False):
            test.cancel("vtpm(emulator backend) is not supported "
                        "on current qemu version.")
        # Install swtpm pkgs on host for vtpm emulation
        if not utils_package.package_install(["swtpm", "swtpm-tools"]):
            test.error("Failed to install swtpm swtpm-tools on host")

    def replace_os_disk(vm_xml, vm_name, nvram):
        """
        Replace os(nvram) and disk(uefi) for x86 vtpm test

        :param vm_xml: current vm's xml
        :param vm_name: current vm name
        :param nvram: nvram file path of vm
        """
        # Add loader, nvram in <os>
        nvram = nvram.replace("<VM_NAME>", vm_name)
        dict_os_attrs = {
            "loader_readonly": "yes",
            "secure": "yes",
            "loader_type": "pflash",
            "loader": loader,
            "nvram": nvram
        }
        vm_xml.set_os_attrs(**dict_os_attrs)
        logging.debug("Set smm=on in VMFeaturesXML")
        # Add smm in <features>
        features_xml = vm_xml.features
        features_xml.smm = "on"
        vm_xml.features = features_xml
        vm_xml.sync()
        # Replace disk with an uefi image
        if not utils_package.package_install("wget"):
            test.error("Failed to install wget on host")
        if uefi_disk_url.count("EXAMPLE"):
            test.error("Please provide the URL %s" % uefi_disk_url)
        else:
            download_cmd = ("wget %s -O %s" %
                            (uefi_disk_url, download_file_path))
            process.system(download_cmd, verbose=False, shell=True)
        vm = env.get_vm(vm_name)
        uefi_disk = {'disk_source_name': download_file_path}
        libvirt.set_vm_disk(vm, uefi_disk)

    vm_names = params.get("vms").split()
    vm_name = vm_names[0]
    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    os_xml = getattr(vm_xml, "os")
    host_arch = platform.machine()
    if backend_type == "emulator" and host_arch == 'x86_64':
        if not utils_package.package_install("OVMF"):
            test.error("Failed to install OVMF or edk2-ovmf pkgs on host")
        if os_xml.xmltreefile.find('nvram') is None:
            replace_os_disk(vm_xml, vm_name, nvram)
            vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    if vm.is_alive():
        vm.destroy()

    vm2 = None
    if multi_vms:
        if len(vm_names) > 1:
            vm2_name = vm_names[1]
            vm2 = env.get_vm(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
            vm2_xml_backup = vm2_xml.copy()
        else:
            # Clone additional vms if needed
            try:
                utils_path.find_command("virt-clone")
            except utils_path.CmdNotFoundError:
                if not utils_package.package_install(["virt-install"]):
                    test.cancel("Failed to install virt-install on host")
            vm2_name = "vm2_" + utils_misc.generate_random_string(5)
            ret_clone = utils_libguestfs.virt_clone_cmd(vm_name,
                                                        vm2_name,
                                                        True,
                                                        timeout=360,
                                                        debug=True)
            if ret_clone.exit_status:
                test.error(
                    "Need more than one domains, but error occured when virt-clone."
                )
            vm2 = vm.clone(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
        if vm2.is_alive():
            vm2.destroy()

    service_mgr = service.ServiceManager()

    def check_dumpxml(vm_name):
        """
        Check whether the added devices are shown in the guest xml

        :param vm_name: current vm name
        """
        logging.info("------Checking guest dumpxml------")
        if tpm_model:
            pattern = '<tpm model="%s">' % tpm_model
        else:
            # The default tpm model is "tpm-tis"
            pattern = '<tpm model="tpm-tis">'
        # Check tpm model
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        logging.debug("xml after add tpm dev is %s", xml_after_adding_device)
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s tpm device xml "
                      "in the guest xml file." % tpm_model)
        # Check backend type
        pattern = '<backend type="%s"' % backend_type
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s backend type xml for tpm dev "
                      "in the guest xml file." % backend_type)
        # Check backend version
        if backend_version:
            check_ver = backend_version if backend_version != 'none' else '2.0'
            pattern = '"emulator" version="%s"' % check_ver
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail(
                    "Can not find the %s backend version xml for tpm dev "
                    "in the guest xml file." % check_ver)
        # Check device path
        if backend_type == "passthrough":
            pattern = '<device path="/dev/tpm0"'
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s device path xml for tpm dev "
                          "in the guest xml file." % device_path)
        # Check encryption secret
        if prepare_secret:
            pattern = '<encryption secret="%s" />' % encryption_uuid
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s secret uuid xml for tpm dev "
                          "in the guest xml file." % encryption_uuid)
        logging.info('------PASS on guest dumpxml check------')

    def check_qemu_cmd_line(vm, vm_name, domid):
        """
        Check whether the added devices are shown in the qemu cmd line

        :param vm: current vm
        :param vm_name: current vm name
        :param domid: domain id for checking vtpm socket file
        """
        logging.info("------Checking qemu cmd line------")
        if not vm.get_pid():
            test.fail('VM pid file missing.')
        with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Qemu cmd line info:\n %s", cmdline)
        # Check tpm model
        pattern_list = ["-device.%s" % tpm_model]
        # Check backend type
        if backend_type == "passthrough":
            dev_num = re.search(r"\d+", device_path).group()
            backend_segment = "id=tpm-tpm%s" % dev_num
        else:
            # emulator backend
            backend_segment = "id=tpm-tpm0,chardev=chrtpm"
        pattern_list.append("-tpmdev.%s,%s" % (backend_type, backend_segment))
        # Check chardev socket for vtpm
        if backend_type == "emulator":
            pattern_list.append(
                "-chardev.socket,id=chrtpm,"
                "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" %
                (domid, vm_name))
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                if not remove_dev:
                    test.fail("Can not find the %s for tpm device "
                              "in qemu cmd line." % pattern)
            elif remove_dev:
                test.fail("%s still exists after remove vtpm and restart" %
                          pattern)
        logging.info("------PASS on qemu cmd line check------")

    def check_swtpm(domid, domuuid, vm_name):
        """
        Check swtpm cmdline and files for vtpm.

        :param domid: domain id for checking vtpm files
        :param domuuid: domain uuid for checking vtpm state file
        :param vm_name: current vm name
        """
        logging.info("------Checking swtpm cmdline and files------")
        # Check swtpm cmdline
        swtpm_pid = utils_misc.get_pid("%s-swtpm.pid" % vm_name)
        if not swtpm_pid:
            if not remove_dev:
                test.fail('swtpm pid file missing.')
            else:
                return
        elif remove_dev:
            test.fail(
                'swtpm pid file still exists after remove vtpm and restart')
        with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Swtpm cmd line info:\n %s", cmdline)
        pattern_list = [
            "--daemon", "--ctrl", "--tpmstate", "--log", "--tpm2", "--pid"
        ]
        if prepare_secret:
            pattern_list.extend(["--key", "--migration-key"])
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                test.fail("Can not find the %s for tpm device "
                          "in swtpm cmd line." % pattern)
        # Check swtpm files
        file_list = [
            "/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)
        ]
        file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid)
        file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name)
        file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" %
                         (domid, vm_name))
        for swtpm_file in file_list:
            if not os.path.exists(swtpm_file):
                test.fail("Swtpm file: %s does not exist" % swtpm_file)
        logging.info("------PASS on Swtpm cmdline and files check------")

    def get_tpm2_tools_cmd(session=None):
        """
        Get tpm2-tools pkg version and return corresponding getrandom cmd

        :session: guest console session
        :return: tpm2_getrandom cmd usage
        """
        cmd = 'rpm -q tpm2-tools'
        get_v_tools = session.cmd(cmd) if session else process.run(
            cmd).stdout_text
        v_tools_list = get_v_tools.strip().split('-')
        if session:
            logging.debug("The tpm2-tools version is %s", v_tools_list[2])
        v_tools = int(v_tools_list[2].split('.')[0])
        return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex"

    def get_host_tpm_bef(tpm_v):
        """
        Test host tpm function and identify its real version before passthrough
        Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or
        tpm2-tools to try the function.

        :param tpm_v: host tpm version get from dmesg info
        :return: host tpm version
        """
        logging.info("------Checking host tpm device before passthrough------")
        # Try tcsd tool for suspected tpm1.2 chip on host
        tpm_real_v = tpm_v
        if tpm_v != "2.0":
            if not service_mgr.start('tcsd'):
                # service_mgr.start() return 'True' if succeed
                if tpm_v == "1.2":
                    test.fail("Host tcsd.serivce start failed")
                else:
                    # Means tpm_v got nothing from dmesg, log failure here and
                    # go to next 'if' to try tpm2.0 tools.
                    logging.info("Host tcsd.serivce start failed")
            else:
                tpm_real_v = "1.2"
                logging.info("Host tpm version info:")
                result = process.run("tpm_version", ignore_status=False)
                logging.debug("[host]# tpm_version\n %s", result.stdout)
                time.sleep(2)
                service_mgr.stop('tcsd')
        if tpm_v != "1.2":
            # Try tpm2.0 tools
            if not utils_package.package_install("tpm2-tools"):
                test.error("Failed to install tpm2-tools on host")
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.cancel("Both tcsd and tpm2-tools can not work, "
                            "pls check your host tpm version and test env.")
            else:
                tpm_real_v = "2.0"
        logging.info("------PASS on host tpm device check------")
        return tpm_real_v

    def test_host_tpm_aft(tpm_real_v):
        """
        Test host tpm function after passthrough

        :param tpm_real_v: host tpm real version indentified from testing
        """
        logging.info("------Checking host tpm device after passthrough------")
        if tpm_real_v == "1.2":
            if service_mgr.start('tcsd'):
                time.sleep(2)
                service_mgr.stop('tcsd')
                test.fail(
                    "Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        elif tpm_real_v == "2.0":
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if not process.run(tpm2_getrandom_cmd,
                               ignore_status=True).exit_status:
                test.fail(
                    "Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        logging.info("------PASS on host tpm device check------")

    def test_guest_tpm(expect_version, session, expect_fail):
        """
        Test tpm function in guest

        :param expect_version: guest tpm version, as host version, or emulator specified
        :param session: Guest session to be tested
        :param expect_fail: guest tpm is expectedly fail to work
        """
        logging.info("------Checking guest tpm device work------")
        if expect_version == "1.2":
            # Install tpm-tools and test by tcsd method
            if not utils_package.package_install(["tpm-tools"], session, 360):
                test.error("Failed to install tpm-tools package in guest")
            else:
                status, output = session.cmd_status_output(
                    "systemctl start tcsd")
                logging.debug("Command output: %s", output)
                if status:
                    if expect_fail:
                        test.cancel(
                            "tpm-crb passthrough only works with host tpm2.0, "
                            "but your host tpm version is 1.2")
                    else:
                        test.fail("Failed to start tcsd.service in guest")
                else:
                    dev_output = session.cmd_output("ls /dev/|grep tpm")
                    logging.debug("Command output: %s", dev_output)
                    status, output = session.cmd_status_output("tpm_version")
                    logging.debug("Command output: %s", output)
                    if status:
                        test.fail("Guest tpm can not work")
        else:
            # If expect_version is tpm2.0, install and test by tpm2-tools
            if not utils_package.package_install(["tpm2-tools"], session, 360):
                test.error("Failed to install tpm2-tools package in guest")
            else:
                tpm2_getrandom_cmd = get_tpm2_tools_cmd(session)
                status1, output1 = session.cmd_status_output(
                    "ls /dev/|grep tpm")
                logging.debug("Command output: %s", output1)
                status2, output2 = session.cmd_status_output(
                    tpm2_getrandom_cmd)
                logging.debug("Command output: %s", output2)
                if status1 or status2:
                    if not expect_fail:
                        test.fail("Guest tpm can not work")
                    else:
                        d_status, d_output = session.cmd_status_output("date")
                        if d_status:
                            test.fail("Guest OS doesn't work well")
                        logging.debug("Command output: %s", d_output)
                elif expect_fail:
                    test.fail("Expect fail but guest tpm still works")
        logging.info("------PASS on guest tpm device work check------")

    def run_test_suite_in_guest(session):
        """
        Run kernel test suite for guest tpm.

        :param session: Guest session to be tested
        """
        logging.info("------Checking kernel test suite for guest tpm------")
        boot_info = session.cmd('uname -r').strip().split('.')
        kernel_version = '.'.join(boot_info[:2])
        # Download test suite per current guest kernel version
        parent_path = "https://cdn.kernel.org/pub/linux/kernel"
        if float(kernel_version) < 5.3:
            major_version = "5"
            file_version = "5.3"
        else:
            major_version = boot_info[0]
            file_version = kernel_version
        src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version,
                                                file_version)
        download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz")
        output = session.cmd_output(download_cmd, timeout=480)
        logging.debug("Command output: %s", output)
        # Install neccessary pkgs to build test suite
        if not utils_package.package_install(
            ["tar", "make", "gcc", "rsync", "python2"], session, 360):
            test.fail("Failed to install specified pkgs in guest OS.")
        # Unzip the downloaded test suite
        status, output = session.cmd_status_output(
            "tar xvJf /root/linux.tar.xz -C /root")
        if status:
            test.fail("Uzip failed: %s" % output)
        # Specify using python2 to run the test suite per supporting
        test_path = "/root/linux-%s/tools/testing/selftests" % file_version
        sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path
        output = session.cmd_output(sed_cmd)
        logging.debug("Command output: %s", output)
        # Build and and run the .sh files of test suite
        status, output = session.cmd_status_output(
            "make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360)
        logging.debug("Command output: %s", output)
        if status:
            test.fail("Failed to run test suite in guest OS.")
        for test_sh in ["test_smoke.sh", "test_space.sh"]:
            pattern = "ok .* selftests: tpm2: %s" % test_sh
            if not re.search(pattern, output) or ("not ok" in output):
                test.fail("test suite check failed.")
        logging.info("------PASS on kernel test suite check------")

    def reuse_by_vm2(tpm_dev):
        """
        Try to add same tpm to a second guest, when it's being used by one guest.

        :param tpm_dev: tpm device to be added into guest xml
        """
        logging.info("------Trying to add same tpm to a second domain------")
        vm2_xml.remove_all_device_by_type('tpm')
        vm2_xml.add_device(tpm_dev)
        vm2_xml.sync()
        ret = virsh.start(vm2_name, ignore_status=True, debug=True)
        if backend_type == "passthrough":
            if ret.exit_status:
                logging.info("Expected failure when try to passthrough a tpm"
                             " that being used by another guest")
                return
            test.fail("Reuse a passthroughed tpm should not succeed.")
        elif ret.exit_status:
            # emulator backend
            test.fail(
                "Vtpm for each guest should not interfere with each other")

    try:
        tpm_real_v = None
        sec_uuids = []
        new_name = ""
        virsh_dargs = {"debug": True, "ignore_status": False}
        vm_xml.remove_all_device_by_type('tpm')
        tpm_dev = Tpm()
        if tpm_model:
            tpm_dev.tpm_model = tpm_model
        if not no_backend:
            backend = tpm_dev.Backend()
            if backend_type != 'none':
                backend.backend_type = backend_type
                if backend_type == "passthrough":
                    tpm_real_v = get_host_tpm_bef(tpm_v)
                    logging.debug("The host tpm real version is %s",
                                  tpm_real_v)
                    if device_path:
                        backend.device_path = device_path
                if backend_type == "emulator":
                    if backend_version != 'none':
                        backend.backend_version = backend_version
                    if prepare_secret:
                        auth_sec_dict = {
                            "sec_ephemeral": "no",
                            "sec_private": "yes",
                            "sec_desc": "sample vTPM secret",
                            "sec_usage": "vtpm",
                            "sec_name": "VTPM_example"
                        }
                        encryption_uuid = libvirt.create_secret(auth_sec_dict)
                        if secret_value != 'none':
                            virsh.secret_set_value(encryption_uuid,
                                                   "open sesame",
                                                   encode=True,
                                                   debug=True)
                        sec_uuids.append(encryption_uuid)
                        if encrypt_change != 'encrpt':
                            # plain_to_encrypt will not add encryption on first start
                            if secret_uuid == 'invalid':
                                encryption_uuid = encryption_uuid[:-1]
                            backend.encryption_secret = encryption_uuid
                        if secret_uuid == "change":
                            auth_sec_dict["sec_desc"] = "sample2 vTPM secret"
                            auth_sec_dict["sec_name"] = "VTPM_example2"
                            new_encryption_uuid = libvirt.create_secret(
                                auth_sec_dict)
                            virsh.secret_set_value(new_encryption_uuid,
                                                   "open sesame",
                                                   encode=True,
                                                   debug=True)
                            sec_uuids.append(new_encryption_uuid)
                    if secret_uuid == 'nonexist':
                        backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
            tpm_dev.backend = backend
        logging.debug("tpm dev xml to add is:\n %s", tpm_dev)
        for num in range(tpm_num):
            vm_xml.add_device(tpm_dev, True)
        ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True)
        expected_match = ""
        if not err_msg:
            expected_match = "Domain .*%s.* defined from %s" % (vm_name,
                                                                vm_xml.xml)
        libvirt.check_result(ret, err_msg, "", False, expected_match)
        if err_msg:
            # Stop test when get expected failure
            return
        if vm_operate != "restart":
            check_dumpxml(vm_name)
        # For default model, no need start guest to test
        if tpm_model:
            expect_fail = False
            try:
                vm.start()
            except VMStartError as detail:
                if secret_value == 'none' or secret_uuid == 'nonexist':
                    logging.debug("Expected failure: %s", detail)
                    return
                else:
                    test.fail(detail)
            domuuid = vm.get_uuid()
            if vm_operate or restart_libvirtd:
                # Make sure OS works before vm operate or restart libvirtd
                session = vm.wait_for_login()
                test_guest_tpm("2.0", session, False)
                session.close()
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()
                swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid
                if vm_operate == "resume":
                    virsh.suspend(vm_name, **virsh_dargs)
                    time.sleep(3)
                    virsh.resume(vm_name, **virsh_dargs)
                elif vm_operate == "snapshot":
                    virsh.snapshot_create_as(
                        vm_name, "sp1 --memspec file=/tmp/testvm_sp1",
                        **virsh_dargs)
                elif vm_operate in ["restart", "create"]:
                    vm.destroy()
                    if vm_operate == "create":
                        virsh.undefine(vm_name,
                                       options="--nvram",
                                       **virsh_dargs)
                        if os.path.exists(swtpm_statedir):
                            test.fail(
                                "Swtpm state dir: %s still exist after vm undefine"
                                % swtpm_statedir)
                        virsh.create(vm_xml.xml, **virsh_dargs)
                    else:
                        if vm_oprt == "domrename":
                            new_name = "vm_" + utils_misc.generate_random_string(
                                5)
                            virsh.domrename(vm_name, new_name, **virsh_dargs)
                            new_vm = libvirt_vm.VM(new_name, vm.params,
                                                   vm.root_dir,
                                                   vm.address_cache)
                            vm = new_vm
                            vm_name = new_name
                        elif secret_value == 'change':
                            logging.info("Changing secret value...")
                            virsh.secret_set_value(encryption_uuid,
                                                   "new sesame",
                                                   encode=True,
                                                   debug=True)
                        elif not restart_libvirtd:
                            # remove_dev or do other vm operations during restart
                            vm_xml.remove_all_device_by_type('tpm')
                            if secret_uuid == "change" or encrypt_change:
                                # Change secret uuid, or change encrytion state:from plain to encrypted, or on the contrary
                                if encrypt_change == 'plain':
                                    # Change from encrypted state to plain:redefine a tpm dev without encryption
                                    tpm_dev = Tpm()
                                    tpm_dev.tpm_model = tpm_model
                                    backend = tpm_dev.Backend()
                                    backend.backend_type = backend_type
                                    backend.backend_version = backend_version
                                else:
                                    # Use a new secret's uuid
                                    if secret_uuid == "change":
                                        encryption_uuid = new_encryption_uuid
                                    backend.encryption_secret = encryption_uuid
                                tpm_dev.backend = backend
                                logging.debug(
                                    "The new tpm dev xml to add for restart vm is:\n %s",
                                    tpm_dev)
                                vm_xml.add_device(tpm_dev, True)
                            if encrypt_change in ['encrpt', 'plain']:
                                # Avoid sync() undefine removing the state file
                                vm_xml.define()
                            else:
                                vm_xml.sync()
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s",
                                          swtpm_statefile)
                            os.remove(swtpm_statefile)
                        ret = virsh.start(vm_name,
                                          ignore_status=True,
                                          debug=True)
                        libvirt.check_exit_status(ret, status_error)
                        if status_error and ret.exit_status != 0:
                            return
                    if not remove_dev:
                        check_dumpxml(vm_name)
                elif vm_operate == 'managedsave':
                    virsh.managedsave(vm_name, **virsh_dargs)
                    time.sleep(5)
                    if secret_value == 'change':
                        logging.info("Changing secret value...")
                        virsh.secret_set_value(encryption_uuid,
                                               "new sesame",
                                               encode=True,
                                               debug=True)
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s",
                                          swtpm_statefile)
                            os.remove(swtpm_statefile)
                    ret = virsh.start(vm_name, ignore_status=True, debug=True)
                    libvirt.check_exit_status(ret, status_error)
                    if status_error and ret.exit_status != 0:
                        return
            domid = vm.get_id()
            check_qemu_cmd_line(vm, vm_name, domid)
            if backend_type == "passthrough":
                if tpm_real_v == "1.2" and tpm_model == "tpm-crb":
                    expect_fail = True
                expect_version = tpm_real_v
                test_host_tpm_aft(tpm_real_v)
            else:
                # emulator backend
                if remove_dev:
                    expect_fail = True
                expect_version = backend_version
                check_swtpm(domid, domuuid, vm_name)
            session = vm.wait_for_login()
            if test_suite:
                run_test_suite_in_guest(session)
            else:
                test_guest_tpm(expect_version, session, expect_fail)
            session.close()
            if multi_vms:
                reuse_by_vm2(tpm_dev)
                if backend_type != "passthrough":
                    #emulator backend
                    check_dumpxml(vm2_name)
                    domid = vm2.get_id()
                    domuuid = vm2.get_uuid()
                    check_qemu_cmd_line(vm2, vm2_name, domid)
                    check_swtpm(domid, domuuid, vm2_name)
                    session = vm2.wait_for_login()
                    test_guest_tpm(backend_version, session, expect_fail)
                    session.close()

    finally:
        # Remove renamed domain if it exists
        if new_name:
            virsh.remove_domain(new_name, "--nvram", debug=True)
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" %
                          new_name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name)
        # Remove snapshot if exists
        if vm_operate == "snapshot":
            snapshot_lists = virsh.snapshot_list(vm_name)
            if len(snapshot_lists) > 0:
                libvirt.clean_up_snapshots(vm_name, snapshot_lists)
                for snap in snapshot_lists:
                    virsh.snapshot_delete(vm_name, snap, "--metadata")
                if os.path.exists("/tmp/testvm_sp1"):
                    os.remove("/tmp/testvm_sp1")
        # Clear guest os
        if test_suite:
            session = vm.wait_for_login()
            logging.info("Removing dir /root/linux-*")
            output = session.cmd_output("rm -rf /root/linux-*")
            logging.debug("Command output:\n %s", output)
            session.close()
        if vm_operate == "create":
            vm.define(vm_xml.xml)
        vm_xml_backup.sync(options="--nvram --managed-save")
        # Remove swtpm log file in case of impact on later runs
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" %
                          vm.name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name)
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True)
        if vm2:
            if len(vm_names) > 1:
                vm2_xml_backup.sync(options="--nvram")
            else:
                virsh.remove_domain(vm2_name,
                                    "--nvram --remove-all-storage",
                                    debug=True)
            if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" %
                              vm2.name):
                os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" %
                          vm2.name)
def run(test, params, env):
    """
    Do test for vol-download and vol-upload

    Basic steps are
    1. Create pool with type defined in cfg
    2. Create image with writing data in it
    3. Get md5 value before operation
    4. Do vol-download/upload with options(offset, length)
    5. Check md5 value after operation
    """

    pool_type = params.get("vol_download_upload_pool_type")
    pool_name = params.get("vol_download_upload_pool_name")
    pool_target = params.get("vol_download_upload_pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("vol_download_upload_vol_name")
    file_name = params.get("vol_download_upload_file_name")
    file_path = os.path.join(data_dir.get_tmp_dir(), file_name)
    offset = params.get("vol_download_upload_offset")
    length = params.get("vol_download_upload_length")
    capacity = params.get("vol_download_upload_capacity")
    allocation = params.get("vol_download_upload_allocation")
    frmt = params.get("vol_download_upload_format")
    operation = params.get("vol_download_upload_operation")
    create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes"))
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")
    b_luks_encrypt = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.error("API acl test not supported in current"
                       " libvirt version.")
    try:
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest",
                     pre_disk_vol=["50M"])
        # According to BZ#1138523, we need inpect the right name
        # (disk partition) for new volume
        if pool_type == "disk":
            vol_name = utlv.new_disk_vol_name(pool_name)
            if vol_name is None:
                test.error("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)
        if create_vol:
            if b_luks_encrypt:
                if not libvirt_version.version_compare(2, 0, 0):
                    test.cancel("LUKS format not supported in "
                                "current libvirt version")
                params['sec_volume'] = os.path.join(pool_target, vol_name)
                luks_sec_uuid = utlv.create_secret(params)
                ret = virsh.secret_set_value(luks_sec_uuid,
                                             encryption_password,
                                             encode=True)
                utlv.check_exit_status(ret)
                secret_uuids.append(luks_sec_uuid)
                vol_arg = {}
                vol_arg['name'] = vol_name
                vol_arg['capacity'] = int(capacity)
                vol_arg['allocation'] = int(allocation)
                create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg)
            else:
                pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name)

        vol_list = virsh.vol_list(pool_name).stdout.strip()
        # iscsi volume name is different from others
        if pool_type == "iscsi":
            vol_name = vol_list.split('\n')[2].split()[0]

        vol_path = virsh.vol_path(vol_name, pool_name,
                                  ignore_status=False).stdout.strip()
        logging.debug("vol_path is %s", vol_path)

        # Add command options
        if pool_type is not None:
            options = " --pool %s" % pool_name
        if offset is not None:
            options += " --offset %s" % offset
            offset = int(offset)
        else:
            offset = 0

        if length is not None:
            options += " --length %s" % length
            length = int(length)
        else:
            length = 0
        logging.debug("%s options are %s", operation, options)

        if operation == "upload":
            # write date to file
            write_file(file_path)

            # Set length for calculate the offset + length in the following
            # func get_pre_post_digest() and digest()
            if length == 0:
                length = 1048576

            def get_pre_post_digest():
                """
                Get pre region and post region digest if have offset and length
                :return: pre digest and post digest
                """
                # Get digest of pre region before offset
                if offset != 0:
                    digest_pre = digest(vol_path, 0, offset)
                else:
                    digest_pre = 0
                logging.debug("pre region digest read from %s 0-%s is %s",
                              vol_path, offset, digest_pre)
                # Get digest of post region after offset+length
                digest_post = digest(vol_path, offset + length, 0)
                logging.debug("post region digest read from %s %s-0 is %s",
                              vol_path, offset + length, digest_post)

                return (digest_pre, digest_post)

            # Get pre and post digest before operation for compare
            (ori_pre_digest, ori_post_digest) = get_pre_post_digest()
            ori_digest = digest(file_path, 0, 0)
            logging.debug("ori digest read from %s is %s", file_path,
                          ori_digest)

            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path, ignore_status=True,
                            shell=True)

            # Do volume upload
            result = virsh.vol_upload(vol_name, file_path, options,
                                      unprivileged_user=unpri_user,
                                      uri=uri, debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                (aft_pre_digest, aft_post_digest) = get_pre_post_digest()
                aft_digest = digest(vol_path, offset, length)
                logging.debug("aft digest read from %s is %s", vol_path,
                              aft_digest)

                # Compare the pre and post part before and after
                if ori_pre_digest == aft_pre_digest and \
                   ori_post_digest == aft_post_digest:
                    logging.info("file pre and aft digest match")
                else:
                    test.fail("file pre or post digests do not"
                              "match, in %s", operation)

        if operation == "download":
            # Write date to volume
            write_file(vol_path)

            # Record the digest value before operation
            ori_digest = digest(vol_path, offset, length)
            logging.debug("original digest read from %s is %s", vol_path,
                          ori_digest)

            process.run("touch %s" % file_path, ignore_status=True, shell=True)
            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path, ignore_status=True,
                            shell=True)

            # Do volume download
            result = virsh.vol_download(vol_name, file_path, options,
                                        unprivileged_user=unpri_user,
                                        uri=uri, debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                aft_digest = digest(file_path, 0, 0)
                logging.debug("new digest read from %s is %s", file_path,
                              aft_digest)

        if result.exit_status != 0:
            test.fail("Fail to %s volume: %s" %
                      (operation, result.stderr))

        # Compare the change part on volume and file
        if ori_digest == aft_digest:
            logging.info("file digests match, volume %s suceed", operation)
        else:
            test.fail("file digests do not match, volume %s failed"
                      % operation)

    finally:
        pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest")
        for secret_uuid in set(secret_uuids):
            virsh.secret_undefine(secret_uuid)
        if os.path.isfile(file_path):
            os.remove(file_path)
Exemplo n.º 29
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare backend storage (blkdev/iscsi/gluster/ceph)
    2.Use luks format to encrypt the backend storage
    3.Prepare a disk xml indicating to the backend storage with valid/invalid
      luks password
    4.Start VM with disk hot/cold plugged
    5.Check some disk operations in VM
    6.Check backend storage is still in luks format
    7.Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def encrypt_dev(device, params):
        """
        Encrypt device with luks format

        :param device: Storage deivce to be encrypted.
        :param params: From the dict to get encryption password.
        """
        password = params.get("luks_encrypt_passwd", "password")
        size = params.get("luks_size", "500M")
        cmd = (
            "qemu-img create -f luks "
            "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
            "-o key-secret=sec0 %s %s" % (password, device, size))
        if process.system(cmd, shell=True):
            test.fail("Can't create a luks encrypted img by qemu-img")

    def check_dev_format(device, fmt="luks"):
        """
        Check if device is in luks format

        :param device: Storage deivce to be checked.
        :param fmt: Expected disk format.
        :return: If device's format equals to fmt, return True, else return False.
        """
        cmd_result = process.run("qemu-img" + ' -h',
                                 ignore_status=True,
                                 shell=True,
                                 verbose=False)
        if b'-U' in cmd_result.stdout:
            cmd = ("qemu-img info -U %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        else:
            cmd = ("qemu-img info %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        if cmd_result.exit_status:
            test.fail("device %s is not in %s format. err is: %s" %
                      (device, fmt, cmd_result.stderr))

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param target: Disk dev in VM.
        :param old_parts: Original disk partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def create_vol(p_name, target_encrypt_params, vol_params):
        """
        Create volume.

        :param p_name: Pool name.
        :param target_encrypt_params: encrypt parameters in dict.
        :param vol_params: Volume parameters dict.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.encryption = volxml.new_encryption(**target_encrypt_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = secret_list_result.stdout.strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage options.
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")

    # Luks encryption info, luks_encrypt_passwd is the password used to encrypt
    # luks image, and luks_secret_passwd is the password set to luks secret, you
    # can set a wrong password to luks_secret_passwd for negative tests
    luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
    luks_secret_passwd = params.get("luks_secret_passwd", "password")
    # Backend storage auth info
    use_auth_usage = "yes" == params.get("use_auth_usage")
    if use_auth_usage:
        use_auth_uuid = False
    else:
        use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    encryption_in_source = "yes" == params.get("encryption_in_source", "no")
    auth_in_source = "yes" == params.get("auth_in_source", "no")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}
    pvt = None
    duplicated_encryption = "yes" == params.get("duplicated_encryption", "no")

    if ((encryption_in_source or auth_in_source)
            and not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot put <encryption> or <auth> inside disk <source> "
                    "in this libvirt version.")
    # Start VM and get all partions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Clean up dirty secrets in test environments if there are.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Create secret
        luks_sec_uuid = libvirt.create_secret(params)
        logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
        ret = virsh.secret_set_value(luks_sec_uuid,
                                     luks_secret_passwd,
                                     encode=True,
                                     debug=True)
        libvirt.check_exit_status(ret)
        # Setup backend storage
        if backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                if enable_auth:
                    chap_user = params.get("chap_user", "redhat")
                    chap_passwd = params.get("chap_passwd", "password")
                    auth_sec_usage = params.get("auth_sec_usage",
                                                "libvirtiscsi")
                    auth_sec_dict = {
                        "sec_usage": "iscsi",
                        "sec_target": auth_sec_usage
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    # Set password of auth secret (not luks encryption secret)
                    virsh.secret_set_value(auth_sec_uuid,
                                           chap_passwd,
                                           encode=True,
                                           debug=True)
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=False,
                        image_size=storage_size,
                        chap_user=chap_user,
                        chap_passwd=chap_passwd,
                        portal_ip=iscsi_host)
                    # ISCSI auth attributes for disk xml
                    if use_auth_uuid:
                        disk_auth_dict = {
                            "auth_user": chap_user,
                            "secret_type": auth_sec_usage_type,
                            "secret_uuid": auth_sec_uuid
                        }
                    elif use_auth_usage:
                        disk_auth_dict = {
                            "auth_user": chap_user,
                            "secret_type": auth_sec_usage_type,
                            "secret_usage": auth_sec_usage_target
                        }
                else:
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=False,
                        image_size=storage_size,
                        portal_ip=iscsi_host)
                device_source = "iscsi://%s:%s/%s/%s" % (
                    iscsi_host, iscsi_port, iscsi_target, lun_num)
                disk_src_dict = {
                    "attrs": {
                        "protocol": "iscsi",
                        "name": "%s/%s" % (iscsi_target, lun_num)
                    },
                    "hosts": [{
                        "name": iscsi_host,
                        "port": iscsi_port
                    }]
                }
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name",
                                           "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)
            device_source = "gluster://%s/%s/%s" % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            disk_src_dict = {
                "attrs": {
                    "protocol": "gluster",
                    "name": "%s/%s" % (gluster_vol_name, gluster_img_name)
                },
                "hosts": [{
                    "name": gluster_host_ip,
                    "port": "24007"
                }]
            }
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {
                        "sec_usage": auth_sec_usage_type,
                        "sec_name": "ceph_auth_secret"
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid,
                                           ceph_auth_key,
                                           debug=True)
                    disk_auth_dict = {
                        "auth_user": ceph_auth_user,
                        "secret_type": auth_sec_usage_type,
                        "secret_uuid": auth_sec_uuid
                    }
                else:
                    test.error("No ceph client name/key provided.")
                device_source = "rbd:%s:mon_host=%s:keyring=%s" % (
                    ceph_disk_name, ceph_mon_ip, key_file)
            else:
                device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name,
                                                        ceph_mon_ip)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            disk_src_dict = {
                "attrs": {
                    "protocol": "rbd",
                    "name": ceph_disk_name
                },
                "hosts": [{
                    "name": ceph_host_ip,
                    "port": ceph_host_port
                }]
            }
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
        # Create dir based pool,and then create one volume on it.
        elif backend_storage_type == "dir":
            pool_name = params.get("pool_name", "dir_pool")
            pool_target = params.get("pool_target")
            pool_type = params.get("pool_type")
            emulated_image = params.get("emulated_image")
            image_name = params.get("dir_image_name", "luks_1.img")
            # Create and start dir_based pool.
            pvt = libvirt.PoolVolumeTest(test, params)
            if not os.path.exists(pool_target):
                os.mkdir(pool_target)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            sp = libvirt_storage.StoragePool()
            if not sp.is_pool_active(pool_name):
                sp.set_pool_autostart(pool_name)
                sp.start_pool(pool_name)
            # Create one volume on the pool.
            volume_name = params.get("vol_name")
            volume_alloc = params.get("vol_alloc")
            volume_cap_unit = params.get("vol_cap_unit")
            volume_cap = params.get("vol_cap")
            volume_target_path = params.get("sec_volume")
            volume_target_format = params.get("target_format")
            volume_target_encypt = params.get("target_encypt", "")
            volume_target_label = params.get("target_label")
            vol_params = {
                "name": volume_name,
                "capacity": int(volume_cap),
                "allocation": int(volume_alloc),
                "format": volume_target_format,
                "path": volume_target_path,
                "label": volume_target_label,
                "capacity_unit": volume_cap_unit
            }
            vol_encryption_params = {}
            vol_encryption_params.update({"format": "luks"})
            vol_encryption_params.update(
                {"secret": {
                    "type": "passphrase",
                    "uuid": luks_sec_uuid
                }})
            try:
                # If Libvirt version is lower than 2.5.0
                # Creating luks encryption volume is not supported,so skip it.
                create_vol(pool_name, vol_encryption_params, vol_params)
            except AssertionError as info:
                err_msgs = ("create: invalid option")
                if str(info).count(err_msgs):
                    test.cancel("Creating luks encryption volume "
                                "is not supported on this libvirt version")
                else:
                    test.error("Failed to create volume."
                               "Error: %s" % str(info))
            disk_src_dict = {'attrs': {'file': volume_target_path}}
            device_source = volume_target_path
        else:
            test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.")
        logging.debug("device source is: %s", device_source)
        if backend_storage_type != "dir":
            encrypt_dev(device_source, params)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            if auth_in_source:
                disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
            else:
                disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_encryption_dict = {
            "encryption": "luks",
            "secret": {
                "type": "passphrase",
                "uuid": luks_sec_uuid
            }
        }
        disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)
        if encryption_in_source:
            disk_source.encryption = disk_encryption
        else:
            disk_xml.encryption = disk_encryption
        if duplicated_encryption:
            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        try:
            vmxml.sync()
            vm.start()
            vm.wait_for_login()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % str(xml_error))
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error and not hotplug_disk:
                logging.info("VM failed to start as expected: %s" %
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        if hotplug_disk:
            result = virsh.attach_device(vm_name,
                                         disk_xml.xml,
                                         ignore_status=True,
                                         debug=True)
            libvirt.check_exit_status(result, status_error)
        if check_partitions and not status_error:
            if not check_in_vm(device_target, old_parts):
                test.fail("Check disk partitions in VM failed")
        check_dev_format(device_source)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)

        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
Exemplo n.º 30
0
def run(test, params, env):
    """
    This test cover two volume commands: vol-clone and vol-wipe.

    1. Create a given type pool.
    2. Create a given format volume in the pool.
    3. Clone the new create volume.
    4. Wipe the new clone volume.
    5. Delete the volume and pool.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if not os.path.dirname(pool_target):
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    new_vol_name = params.get("new_vol_name")
    vol_capability = params.get("vol_capability")
    vol_allocation = params.get("vol_allocation")
    vol_format = params.get("vol_format")
    clone_option = params.get("clone_option", "")
    wipe_algorithms = params.get("wipe_algorithms")
    b_luks_encrypted = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []
    wipe_old_vol = False

    if virsh.has_command_help_match("vol-clone", "--prealloc-metadata") is None:
        if "prealloc-metadata" in clone_option:
            test.cancel("Option --prealloc-metadata "
                        "is not supported.")

    clone_status_error = "yes" == params.get("clone_status_error", "no")
    wipe_status_error = "yes" == params.get("wipe_status_error", "no")
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # Using algorithms other than zero need scrub installed.
    try:
        utils_path.find_command('scrub')
    except utils_path.CmdNotFoundError:
        logging.warning("Can't locate scrub binary, only 'zero' algorithm "
                        "is used.")
        valid_algorithms = ["zero"]
    else:
        valid_algorithms = ["zero", "nnsa", "dod", "bsi", "gutmann",
                            "schneier", "pfitzner7", "pfitzner33", "random"]

    # Choose an algorithm randomly
    if wipe_algorithms:
        alg = random.choice(wipe_algorithms.split())
    else:
        alg = random.choice(valid_algorithms)

    libvirt_pvt = utlv.PoolVolumeTest(test, params)
    libvirt_pool = libvirt_storage.StoragePool()
    if libvirt_pool.pool_exists(pool_name):
        test.error("Pool '%s' already exist" % pool_name)
    try:
        # Create a new pool
        disk_vol = []
        if pool_type == 'disk':
            disk_vol.append(params.get("pre_vol", '10M'))
        libvirt_pvt.pre_pool(pool_name=pool_name,
                             pool_type=pool_type,
                             pool_target=pool_target,
                             emulated_image=emulated_image,
                             image_size=emulated_image_size,
                             pre_disk_vol=disk_vol)

        libvirt_vol = libvirt_storage.PoolVolume(pool_name)
        # Create a new volume
        if vol_format in ['raw', 'qcow2', 'qed', 'vmdk']:
            if (b_luks_encrypted and vol_format in ['raw']):
                if not libvirt_version.version_compare(2, 0, 0):
                    test.cancel("LUKS is not supported in current"
                                " libvirt version")
                luks_sec_uuid = create_luks_secret(os.path.join(pool_target,
                                                                vol_name),
                                                   encryption_password, test)
                secret_uuids.append(luks_sec_uuid)
                vol_arg = {}
                vol_arg['name'] = vol_name
                vol_arg['capacity'] = int(vol_capability)
                vol_arg['allocation'] = int(vol_allocation)
                create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg)
            else:
                libvirt_pvt.pre_vol(vol_name=vol_name,
                                    vol_format=vol_format,
                                    capacity=vol_capability,
                                    allocation=None,
                                    pool_name=pool_name)
        elif vol_format == 'partition':
            vol_name = list(utlv.get_vol_list(pool_name).keys())[0]
            logging.debug("Find partition %s in disk pool", vol_name)
        elif vol_format == 'sparse':
            # Create a sparse file in pool
            sparse_file = pool_target + '/' + vol_name
            cmd = "dd if=/dev/zero of=" + sparse_file
            cmd += " bs=1 count=0 seek=" + vol_capability
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Unknown volume format %s" % vol_format)

        # Refresh the pool
        virsh.pool_refresh(pool_name, debug=True)
        vol_info = libvirt_vol.volume_info(vol_name)
        if not vol_info:
            test.error("Fail to get info of volume %s" % vol_name)

        for key in vol_info:
            logging.debug("Original volume info: %s = %s", key, vol_info[key])

        # Metadata preallocation is not support for block volume
        if vol_info["Type"] == "block" and clone_option.count("prealloc-metadata"):
            clone_status_error = True
        if b_luks_encrypted:
            wipe_old_vol = True

        if pool_type == "disk":
            new_vol_name = utlv.new_disk_vol_name(pool_name)
            if new_vol_name is None:
                test.error("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % new_vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)

        bad_cloned_vol_name = params.get("bad_cloned_vol_name", "")
        if bad_cloned_vol_name:
            new_vol_name = bad_cloned_vol_name

        # Clone volume
        clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name,
                                       clone_option, debug=True)
        if not clone_status_error:
            if clone_result.exit_status != 0:
                test.fail("Clone volume fail:\n%s" %
                          clone_result.stderr.strip())
            else:
                vol_info = libvirt_vol.volume_info(new_vol_name)
                for key in vol_info:
                    logging.debug("Cloned volume info: %s = %s", key,
                                  vol_info[key])
                logging.debug("Clone volume successfully.")
                # Wipe the new clone volume
                if alg:
                    logging.debug("Wiping volume by '%s' algorithm", alg)
                wipe_result = virsh.vol_wipe(new_vol_name, pool_name, alg,
                                             unprivileged_user=unpri_user,
                                             uri=uri, debug=True)
                unsupported_err = ["Unsupported algorithm",
                                   "no such pattern sequence"]
                if not wipe_status_error:
                    if wipe_result.exit_status != 0:
                        if any(err in wipe_result.stderr for err in unsupported_err):
                            test.cancel(wipe_result.stderr)
                        test.fail("Wipe volume fail:\n%s" %
                                  clone_result.stdout.strip())
                    else:
                        virsh_vol_info = libvirt_vol.volume_info(new_vol_name)
                        for key in virsh_vol_info:
                            logging.debug("Wiped volume info(virsh): %s = %s",
                                          key, virsh_vol_info[key])
                        vol_path = virsh.vol_path(new_vol_name,
                                                  pool_name).stdout.strip()
                        qemu_vol_info = utils_misc.get_image_info(vol_path)
                        for key in qemu_vol_info:
                            logging.debug("Wiped volume info(qemu): %s = %s",
                                          key, qemu_vol_info[key])
                            if qemu_vol_info['format'] != 'raw':
                                test.fail("Expect wiped volume "
                                          "format is raw")
                elif wipe_status_error and wipe_result.exit_status == 0:
                    test.fail("Expect wipe volume fail, but run"
                              " successfully.")
        elif clone_status_error and clone_result.exit_status == 0:
            test.fail("Expect clone volume fail, but run"
                      " successfully.")

        if wipe_old_vol:
            # Wipe the old volume
            if alg:
                logging.debug("Wiping volume by '%s' algorithm", alg)
            wipe_result = virsh.vol_wipe(vol_name, pool_name, alg,
                                         unprivileged_user=unpri_user,
                                         uri=uri, debug=True)
            unsupported_err = ["Unsupported algorithm",
                               "no such pattern sequence"]
            if not wipe_status_error:
                if wipe_result.exit_status != 0:
                    if any(err in wipe_result.stderr for err in unsupported_err):
                        test.cancel(wipe_result.stderr)
                    test.fail("Wipe volume fail:\n%s" %
                              clone_result.stdout.strip())
                else:
                    virsh_vol_info = libvirt_vol.volume_info(vol_name)
                    for key in virsh_vol_info:
                        logging.debug("Wiped volume info(virsh): %s = %s",
                                      key, virsh_vol_info[key])
                    vol_path = virsh.vol_path(vol_name,
                                              pool_name).stdout.strip()
                    qemu_vol_info = utils_misc.get_image_info(vol_path)
                    for key in qemu_vol_info:
                        logging.debug("Wiped volume info(qemu): %s = %s",
                                      key, qemu_vol_info[key])
                        if qemu_vol_info['format'] != 'raw':
                            test.fail("Expect wiped volume "
                                      "format is raw")
            elif wipe_status_error and wipe_result.exit_status == 0:
                test.fail("Expect wipe volume fail, but run"
                          " successfully.")

        if bad_cloned_vol_name:
            pattern = "volume name '%s' cannot contain '/'" % new_vol_name
            if re.search(pattern, clone_result.stderr) is None:
                test.fail("vol-clone failed with unexpected reason")
    finally:
        # Clean up
        try:
            libvirt_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                     emulated_image)
            for secret_uuid in set(secret_uuids):
                virsh.secret_undefine(secret_uuid)

        except exceptions.TestFail as detail:
            logging.error(str(detail))
Exemplo n.º 31
0
def run(test, params, env):
    """
    Test command: virsh vol-resize

    Resize the capacity of the given volume (default bytes).
    1. Define and start a given type pool.
    2. Create a volume in the pool.
    3. Do vol-resize.
    4. Check the volume info.
    5. Delete the volume and pool.

    TODO:
    Add volume shrink test after libvirt uptream support it.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    vol_capacity = params.get("vol_capacity")
    vol_new_capacity = params.get("vol_new_capacity")
    resize_option = params.get("resize_option", "")
    check_vol_size = "yes" == params.get("check_vol_size", "yes")
    status_error = "yes" == params.get("status_error", "no")
    b_luks_encrypt = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []

    if not libvirt_version.version_compare(1, 0, 0):
        if "--allocate" in resize_option:
            test.cancel("'--allocate' flag is not supported in"
                        " current libvirt version.")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    libv_pvt = libvirt.PoolVolumeTest(test, params)
    try:
        libv_pool = libvirt_storage.StoragePool()
        # Raise error if given name pool already exist
        if libv_pool.pool_exists(pool_name):
            test.error("Pool '%s' already exist", pool_name)
        else:
            # Create a new pool
            libv_pvt.pre_pool(pool_name,
                              pool_type,
                              pool_target,
                              emulated_image,
                              image_size=emulated_image_size)
            pool_info = libv_pool.pool_info(pool_name)
            for key in pool_info:
                logging.debug("Pool info: %s = %s", key, pool_info[key])
            # Deal with vol_new_capacity, '--capacity' only accpet integer
            if vol_new_capacity == "pool_available":
                pool_avai = pool_info["Available"].split()
                vol_new_capacity = pool_avai[0].split('.')[0] + pool_avai[1]
            if vol_new_capacity == "pool_capacity":
                pool_capa = pool_info["Capacity"].split()
                vol_new_capacity = pool_capa[0].split('.')[0] + pool_capa[1]

        # Create a volume
        if b_luks_encrypt:
            luks_sec_uuid = create_luks_secret(
                os.path.join(pool_target, vol_name), test)
            secret_uuids.append(luks_sec_uuid)
            set_secret_value(encryption_password, luks_sec_uuid)
            create_luks_vol(vol_name, luks_sec_uuid, params, test)
        else:
            libv_pvt.pre_vol(vol_name=vol_name,
                             vol_format=vol_format,
                             capacity=vol_capacity,
                             allocation=None,
                             pool_name=pool_name)
        libv_vol = libvirt_storage.PoolVolume(pool_name)
        check_vol_info(libv_vol, vol_name, test)

        # The volume size may not accurate as we expect after resize, such as:
        # 1) vol_new_capacity = 1b with --delta option, the volume size will not
        #    change; run
        # 2) vol_new_capacity = 1KB with --delta option, the volume size will
        #    increase 1024 not 1000
        # So we can disable volume size check after resize
        if check_vol_size:
            vol_path = libv_vol.list_volumes()[vol_name]
            expect_info = get_expect_info(vol_new_capacity, vol_path, test,
                                          resize_option)
            logging.debug("Expect volume info: %s", expect_info)
        else:
            expect_info = {}

        # Run vol-resize
        result = virsh.vol_resize(vol_name,
                                  vol_new_capacity,
                                  pool_name,
                                  resize_option,
                                  uri=uri,
                                  unprivileged_user=unpri_user,
                                  debug=True)
        if not status_error:
            if result.exit_status != 0:
                test.fail(result.stdout.strip() + result.stderr.strip())
            else:
                if check_vol_info(libv_vol, vol_name, test, expect_info):
                    logging.debug("Volume %s resize check pass.", vol_name)
                else:
                    test.fail("Volume %s resize check fail." % vol_name)
        elif result.exit_status == 0:
            test.fail("Expect resize fail but run successfully.")
    finally:
        # Clean up
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image)
            for secret_uuid in set(secret_uuids):
                virsh.secret_undefine(secret_uuid)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
Exemplo n.º 32
0
    cmd_result = virsh.secret_list(**virsh_dargs)
    libvirt.check_exit_status(cmd_result)
    try:
        uuid = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                          str(cmd_result.stdout))[1][0]
    except IndexError:
        raise error.TestError("Fail to get secret uuid")

    if uuid:
        try:
            virsh.secret_dumpxml(uuid, to_file=file, **virsh_dargs)
        except error.CmdError, e:
            raise error.TestError(str(e))

    virsh.secret_undefine(uuid, ignore_status=True)
    if os.path.exists(volume_path):
        os.unlink(volume_path)
    if os.path.exists(secret_obj_xmlfile):
        os.unlink(secret_obj_xmlfile)


def interface_validate(file=None, **virsh_dargs):
    """
    Test for schema interface
    """
    cmd_result = virsh.iface_list(**virsh_dargs)
    libvirt.check_exit_status(cmd_result)
    try:
        iface_name = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                                str(cmd_result.stdout))[1][0]
Exemplo n.º 33
0
def run(test, params, env):
    """
    This test cover two volume commands: vol-clone and vol-wipe.

    1. Create a given type pool.
    2. Create a given format volume in the pool.
    3. Clone the new create volume.
    4. Wipe the new clone volume.
    5. Delete the volume and pool.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if not os.path.dirname(pool_target):
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    new_vol_name = params.get("new_vol_name")
    vol_capability = params.get("vol_capability")
    vol_allocation = params.get("vol_allocation")
    vol_format = params.get("vol_format")
    clone_option = params.get("clone_option", "")
    wipe_algorithms = params.get("wipe_algorithms")
    b_luks_encrypted = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []
    wipe_old_vol = False
    with_clusterSize = "yes" == params.get("with_clusterSize")
    vol_clusterSize = params.get("vol_clusterSize", "64")
    vol_clusterSize_unit = params.get("vol_clusterSize_unit")
    libvirt_version.is_libvirt_feature_supported(params)

    if virsh.has_command_help_match("vol-clone",
                                    "--prealloc-metadata") is None:
        if "prealloc-metadata" in clone_option:
            test.cancel("Option --prealloc-metadata " "is not supported.")

    clone_status_error = "yes" == params.get("clone_status_error", "no")
    wipe_status_error = "yes" == params.get("wipe_status_error", "no")
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # Using algorithms other than zero need scrub installed.
    try:
        utils_path.find_command('scrub')
    except utils_path.CmdNotFoundError:
        logging.warning("Can't locate scrub binary, only 'zero' algorithm "
                        "is used.")
        valid_algorithms = ["zero"]
    else:
        valid_algorithms = [
            "zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7",
            "pfitzner33", "random"
        ]

    # Choose an algorithm randomly
    if wipe_algorithms:
        alg = random.choice(wipe_algorithms.split())
    else:
        alg = random.choice(valid_algorithms)

    libvirt_pvt = utlv.PoolVolumeTest(test, params)
    libvirt_pool = libvirt_storage.StoragePool()
    if libvirt_pool.pool_exists(pool_name):
        test.error("Pool '%s' already exist" % pool_name)
    try:
        # Create a new pool
        disk_vol = []
        if pool_type == 'disk':
            disk_vol.append(params.get("pre_vol", '10M'))
        libvirt_pvt.pre_pool(pool_name=pool_name,
                             pool_type=pool_type,
                             pool_target=pool_target,
                             emulated_image=emulated_image,
                             image_size=emulated_image_size,
                             pre_disk_vol=disk_vol)

        libvirt_vol = libvirt_storage.PoolVolume(pool_name)
        # Create a new volume
        if vol_format in ['raw', 'qcow2', 'qed', 'vmdk']:
            if (b_luks_encrypted and vol_format in ['raw', 'qcow2']):
                if not libvirt_version.version_compare(2, 0, 0):
                    test.cancel("LUKS is not supported in current"
                                " libvirt version")
                if vol_format == "qcow2" and not libvirt_version.version_compare(
                        6, 10, 0):
                    test.cancel("Qcow2 format with luks encryption is not"
                                " supported in current libvirt version")
                luks_sec_uuid = create_luks_secret(
                    os.path.join(pool_target, vol_name), encryption_password,
                    test)
                secret_uuids.append(luks_sec_uuid)
                vol_arg = {}
                vol_arg['name'] = vol_name
                vol_arg['capacity'] = int(vol_capability)
                vol_arg['allocation'] = int(vol_allocation)
                vol_arg['format'] = vol_format
                if with_clusterSize:
                    vol_arg['clusterSize'] = int(vol_clusterSize)
                    vol_arg['clusterSize_unit'] = vol_clusterSize_unit
                create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg)
            else:
                libvirt_pvt.pre_vol(vol_name=vol_name,
                                    vol_format=vol_format,
                                    capacity=vol_capability,
                                    allocation=None,
                                    pool_name=pool_name)
        elif vol_format == 'partition':
            vol_name = list(utlv.get_vol_list(pool_name).keys())[0]
            logging.debug("Find partition %s in disk pool", vol_name)
        elif vol_format == 'sparse':
            # Create a sparse file in pool
            sparse_file = pool_target + '/' + vol_name
            cmd = "dd if=/dev/zero of=" + sparse_file
            cmd += " bs=1 count=0 seek=" + vol_capability
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Unknown volume format %s" % vol_format)

        # Refresh the pool
        virsh.pool_refresh(pool_name, debug=True)
        vol_info = libvirt_vol.volume_info(vol_name)
        if not vol_info:
            test.error("Fail to get info of volume %s" % vol_name)

        for key in vol_info:
            logging.debug("Original volume info: %s = %s", key, vol_info[key])

        # Metadata preallocation is not support for block volume
        if vol_info["Type"] == "block" and clone_option.count(
                "prealloc-metadata"):
            clone_status_error = True
        if b_luks_encrypted:
            wipe_old_vol = True

        if pool_type == "disk":
            new_vol_name = utlv.new_disk_vol_name(pool_name)
            if new_vol_name is None:
                test.error("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % new_vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)

        bad_cloned_vol_name = params.get("bad_cloned_vol_name", "")
        if bad_cloned_vol_name:
            new_vol_name = bad_cloned_vol_name

        # Clone volume
        clone_result = virsh.vol_clone(vol_name,
                                       new_vol_name,
                                       pool_name,
                                       clone_option,
                                       debug=True)
        if not clone_status_error:
            if clone_result.exit_status != 0:
                test.fail("Clone volume fail:\n%s" %
                          clone_result.stderr.strip())
            else:
                vol_info = libvirt_vol.volume_info(new_vol_name)
                for key in vol_info:
                    logging.debug("Cloned volume info: %s = %s", key,
                                  vol_info[key])
                logging.debug("Clone volume successfully.")
                # Wipe the new clone volume
                if alg:
                    logging.debug("Wiping volume by '%s' algorithm", alg)
                wipe_result = virsh.vol_wipe(new_vol_name,
                                             pool_name,
                                             alg,
                                             unprivileged_user=unpri_user,
                                             uri=uri,
                                             debug=True)
                unsupported_err = [
                    "Unsupported algorithm", "no such pattern sequence"
                ]
                if not wipe_status_error:
                    if wipe_result.exit_status != 0:
                        if any(err in wipe_result.stderr
                               for err in unsupported_err):
                            test.cancel(wipe_result.stderr)
                        test.fail("Wipe volume fail:\n%s" %
                                  clone_result.stdout.strip())
                    else:
                        virsh_vol_info = libvirt_vol.volume_info(new_vol_name)
                        for key in virsh_vol_info:
                            logging.debug("Wiped volume info(virsh): %s = %s",
                                          key, virsh_vol_info[key])
                        vol_path = virsh.vol_path(new_vol_name,
                                                  pool_name).stdout.strip()
                        qemu_vol_info = utils_misc.get_image_info(vol_path)
                        for key in qemu_vol_info:
                            logging.debug("Wiped volume info(qemu): %s = %s",
                                          key, qemu_vol_info[key])
                            if qemu_vol_info['format'] != 'raw':
                                test.fail("Expect wiped volume "
                                          "format is raw")
                elif wipe_status_error and wipe_result.exit_status == 0:
                    test.fail("Expect wipe volume fail, but run"
                              " successfully.")
        elif clone_status_error and clone_result.exit_status == 0:
            test.fail("Expect clone volume fail, but run" " successfully.")

        if wipe_old_vol:
            # Wipe the old volume
            if alg:
                logging.debug("Wiping volume by '%s' algorithm", alg)
            wipe_result = virsh.vol_wipe(vol_name,
                                         pool_name,
                                         alg,
                                         unprivileged_user=unpri_user,
                                         uri=uri,
                                         debug=True)
            unsupported_err = [
                "Unsupported algorithm", "no such pattern sequence"
            ]
            if not wipe_status_error:
                if wipe_result.exit_status != 0:
                    if any(err in wipe_result.stderr
                           for err in unsupported_err):
                        test.cancel(wipe_result.stderr)
                    test.fail("Wipe volume fail:\n%s" %
                              clone_result.stdout.strip())
                else:
                    virsh_vol_info = libvirt_vol.volume_info(vol_name)
                    for key in virsh_vol_info:
                        logging.debug("Wiped volume info(virsh): %s = %s", key,
                                      virsh_vol_info[key])
                    vol_path = virsh.vol_path(vol_name,
                                              pool_name).stdout.strip()
                    qemu_vol_info = utils_misc.get_image_info(vol_path)
                    for key in qemu_vol_info:
                        logging.debug("Wiped volume info(qemu): %s = %s", key,
                                      qemu_vol_info[key])
                        if qemu_vol_info['format'] != 'raw':
                            test.fail("Expect wiped volume " "format is raw")
            elif wipe_status_error and wipe_result.exit_status == 0:
                test.fail("Expect wipe volume fail, but run" " successfully.")

        if bad_cloned_vol_name:
            pattern = "volume name '%s' cannot contain '/'" % new_vol_name
            if re.search(pattern, clone_result.stderr) is None:
                test.fail("vol-clone failed with unexpected reason")
    finally:
        # Clean up
        try:
            libvirt_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                     emulated_image)
            for secret_uuid in set(secret_uuids):
                virsh.secret_undefine(secret_uuid)

        except exceptions.TestFail as detail:
            logging.error(str(detail))
Exemplo n.º 34
0
def run(test, params, env):
    """
    Test the pull-mode backup function

    Steps:
    1. craete a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. create some data on vdb
    5. start a pull mode incremental backup
    6. repeat step 5 to 7
    7. check the full/incremental backup file data
    """

    # Basic case config
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    local_hostname = params.get("loal_hostname", "localhost")
    local_ip = params.get("local_ip", "127.0.0.1")
    local_user_name = params.get("local_user_name", "root")
    local_user_password = params.get("local_user_password", "redhat")
    tmp_dir = data_dir.get_tmp_dir()
    # Backup config
    scratch_type = params.get("scratch_type", "file")
    reuse_scratch_file = "yes" == params.get("reuse_scratch_file")
    prepare_scratch_file = "yes" == params.get("prepare_scratch_file")
    scratch_blkdev_path = params.get("scratch_blkdev_path")
    scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size)
    prepare_scratch_blkdev = "yes" == params.get("prepare_scratch_blkdev")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    # NBD service config
    nbd_protocol = params.get("nbd_protocol", "unix")
    nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")
    nbd_hostname = local_hostname
    set_exportname = "yes" == params.get("set_exportname")
    set_exportbitmap = "yes" == params.get("set_exportbitmap")
    # TLS service config
    tls_enabled = "yes" == params.get("tls_enabled")
    tls_x509_verify = "yes" == params.get("tls_x509_verify")
    custom_pki_path = "yes" == params.get("custom_pki_path")
    tls_client_ip = tls_server_ip = local_ip
    tls_client_cn = tls_server_cn = local_hostname
    tls_client_user = tls_server_user = local_user_name
    tls_client_pwd = tls_server_pwd = local_user_password
    tls_provide_client_cert = "yes" == params.get("tls_provide_client_cert")
    tls_error = "yes" == params.get("tls_error")
    # LUKS config
    scratch_luks_encrypted = "yes" == params.get("scratch_luks_encrypted")
    luks_passphrase = params.get("luks_passphrase", "password")

    # Cancel the test if libvirt support related functions
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")
    if tls_enabled and not libvirt_version.version_compare(6, 6, 0):
        test.cancel("Current libvirt version doesn't support pull mode "
                    "backup with tls nbd.")

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        # Prepare tls env
        if tls_enabled:
            # Prepare pki
            tls_config = {
                "qemu_tls": "yes",
                "auto_recover": "yes",
                "client_ip": tls_client_ip,
                "server_ip": tls_server_ip,
                "client_cn": tls_client_cn,
                "server_cn": tls_server_cn,
                "client_user": tls_client_user,
                "server_user": tls_server_user,
                "client_pwd": tls_client_pwd,
                "server_pwd": tls_server_pwd,
            }
            if custom_pki_path:
                pki_path = os.path.join(tmp_dir, "inc_bkup_pki")
            else:
                pki_path = "/etc/pki/libvirt-backup/"
            if tls_x509_verify:
                tls_config["client_ip"] = tls_client_ip
            tls_config["custom_pki_path"] = pki_path
            tls_obj = TLSConnection(tls_config)
            tls_obj.conn_setup(True, tls_provide_client_cert)
            logging.debug("TLS certs in: %s" % pki_path)
            # Set qemu.conf
            qemu_config = LibvirtQemuConfig()
            if tls_x509_verify:
                qemu_config.backup_tls_x509_verify = True
            else:
                qemu_config.backup_tls_x509_verify = False
            if custom_pki_path:
                qemu_config.backup_tls_x509_cert_dir = pki_path
            utils_libvirtd.Libvirtd().restart()

        # Prepare libvirt secret
        if scratch_luks_encrypted:
            utils_secret.clean_up_secrets()
            luks_secret_uuid = libvirt.create_secret(params)
            virsh.secret_set_value(luks_secret_uuid,
                                   luks_passphrase,
                                   encode=True,
                                   debug=True)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "iscsi":
            iscsi_host = '127.0.0.1'
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size=original_disk_size,
                portal_ip=iscsi_host)
            disk_path = ("iscsi://[%s]/%s/%s" %
                         (iscsi_host, iscsi_target, lun_num))
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'iscsi',
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': iscsi_host,
                'source_host_port': '3260'
            }
            disk_params.update(disk_params_src)
        elif original_disk_type == "gluster":
            gluster_vol_name = "gluster_vol"
            gluster_pool_name = "gluster_pool"
            gluster_img_name = "gluster.qcow2"
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)
            disk_path = 'gluster://%s/%s/%s' % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'gluster',
                'source_name': gluster_vol_name + "/%s" % gluster_img_name,
                'source_host_name': gluster_host_ip,
                'source_host_port': '24007'
            }
            disk_params.update(disk_params_src)
        else:
            test.error("The disk type '%s' not supported in this script.",
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_file_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "pull"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            # Set libvirt default nbd export name and bitmap name
            nbd_export_name = original_disk_target
            nbd_bitmap_name = "backup-" + original_disk_target

            backup_server_dict = {}
            if nbd_protocol == "unix":
                backup_server_dict["transport"] = "unix"
                backup_server_dict["socket"] = nbd_socket
            else:
                backup_server_dict["name"] = nbd_hostname
                backup_server_dict["port"] = nbd_tcp_port
                if tls_enabled:
                    backup_server_dict["tls"] = "yes"
            backup_params["backup_server"] = backup_server_dict
            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = scratch_type

                    # Custom nbd export name and bitmap name if required
                    if set_exportname:
                        nbd_export_name = original_disk_target + "_custom_exp"
                        backup_disk_params["exportname"] = nbd_export_name
                    if set_exportbitmap:
                        nbd_bitmap_name = original_disk_target + "_custom_bitmap"
                        backup_disk_params["exportbitmap"] = nbd_bitmap_name

                    # Prepare nbd scratch file/dev params
                    scratch_params = {"attrs": {}}
                    scratch_path = None
                    if scratch_type == "file":
                        scratch_file_name = "scratch_file_%s" % backup_index
                        scratch_path = os.path.join(tmp_dir, scratch_file_name)
                        if prepare_scratch_file:
                            libvirt.create_local_disk("file", scratch_path,
                                                      original_disk_size,
                                                      "qcow2")
                        scratch_params["attrs"]["file"] = scratch_path
                    elif scratch_type == "block":
                        if prepare_scratch_blkdev:
                            scratch_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=scratch_blkdev_size)
                        scratch_params["attrs"]["dev"] = scratch_path
                    else:
                        test.fail(
                            "We do not support backup scratch type: '%s'" %
                            scratch_type)
                    if scratch_luks_encrypted:
                        encryption_dict = {
                            "encryption": "luks",
                            "secret": {
                                "type": "passphrase",
                                "uuid": luks_secret_uuid
                            }
                        }
                        scratch_params["encryption"] = encryption_dict
                    logging.debug("scratch params: %s", scratch_params)
                    backup_disk_params["backup_scratch"] = scratch_params

                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)

            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()
            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml
            if reuse_scratch_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               ignore_status=True,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())

            backup_file_path = os.path.join(
                tmp_dir, "backup_file_%s.qcow2" % str(backup_index))
            backup_file_list.append(backup_file_path)
            nbd_params = {
                "nbd_protocol": nbd_protocol,
                "nbd_export": nbd_export_name
            }
            if nbd_protocol == "unix":
                nbd_params["nbd_socket"] = nbd_socket
            elif nbd_protocol == "tcp":
                nbd_params["nbd_hostname"] = nbd_hostname
                nbd_params["nbd_tcp_port"] = nbd_tcp_port
                if tls_enabled:
                    nbd_params["tls_dir"] = pki_path
                    nbd_params["tls_server_ip"] = tls_server_ip
            if not is_incremental:
                # Do full backup
                try:
                    utils_backup.pull_full_backup_to_file(
                        nbd_params, backup_file_path)
                except Exception as details:
                    if tls_enabled and tls_error:
                        raise utils_backup.BackupTLSError(details)
                    else:
                        test.fail("Fail to get full backup data: %s" % details)
                logging.debug("Full backup to: %s", backup_file_path)
            else:
                # Do incremental backup
                utils_backup.pull_incremental_backup_to_file(
                    nbd_params, backup_file_path, nbd_bitmap_name,
                    original_disk_size)
            # Check if scratch file encrypted
            if scratch_luks_encrypted and scratch_path:
                cmd = "qemu-img info -U %s" % scratch_path
                result = process.run(cmd, shell=True,
                                     verbose=True).stdout_text.strip()
                if (not re.search("format.*luks", result, re.IGNORECASE)
                        or not re.search("encrypted.*yes", result,
                                         re.IGNORECASE)):
                    test.fail("scratch file/dev is not encrypted by LUKS")
            virsh.domjobabort(vm_name, debug=True)

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)
        for backup_file in backup_file_list:
            if not utils_backup.cmp_backup_data(original_data_file,
                                                backup_file):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_file))
            else:
                logging.debug("'%s' contains correct backup data", backup_file)
    except (utils_backup.BackupBeginError,
            utils_backup.BackupTLSError) as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        elif tls_error:
            logging.debug("Failed to get backup data as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints
        if "checkpoint_list" in locals() and checkpoint_list:
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name, checkpoint_name)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove iscsi devices
        if original_disk_type == "iscsi" or scratch_type == "block":
            libvirt.setup_or_cleanup_iscsi(False)

        # Remove gluster devices
        if original_disk_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)

        # Recover qemu.conf
        if "qemu_config" in locals():
            qemu_config.restore()

        # Remove tls object
        if "tls_obj" in locals():
            del tls_obj

        # Remove libvirt secret
        if "luks_secret_uuid" in locals():
            virsh.secret_undefine(luks_secret_uuid, ignore_status=True)
Exemplo n.º 35
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment, destroy or suspend a VM.
    2.Prepare tgtd and secret config.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def check_save_restore(save_file):
        """
        Test domain save and restore.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def check_snapshot():
        """
        Test domain snapshot operation.
        """
        snapshot1 = "s1"
        snapshot2 = "s2"

        ret = virsh.snapshot_create_as(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(
            vm_name, "%s --disk-only --diskspec vda,"
            "file=/tmp/testvm-snap1" % snapshot2)
        libvirt.check_exit_status(ret, True)

        ret = virsh.snapshot_create_as(
            vm_name, "%s --memspec file=%s,snapshot=external"
            " --diskspec vda,file=/tmp/testvm-snap2" % (snapshot2, snapshot2))
        libvirt.check_exit_status(ret, True)

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]
            elif target.startswith("sd"):
                added_part = added_parts[0]
            if not added_part:
                logging.error("Cann't see added partition in VM")
                return False

            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_qemu_cmd():
        """
        Check qemu-kvm command line options
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if driver_iothread:
            cmd += " | grep iothread=iothread%s" % driver_iothread

        if process.system(cmd, ignore_status=True, shell=True):
            test.fail("Can't see disk option '%s' " "in command line" % cmd)

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")

    # Controller specific attributes.
    cntlr_type = params.get('controller_type', None)
    cntlr_model = params.get('controller_model', None)
    cntlr_index = params.get('controller_index', None)
    controller_addr_options = params.get('controller_addr_options', None)

    driver_iothread = params.get("driver_iothread")

    # iscsi options.
    iscsi_target = params.get("iscsi_target")
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1")
    uuid = params.get("uuid", "")
    auth_uuid = "yes" == params.get("auth_uuid", "")
    auth_usage = "yes" == params.get("auth_usage", "")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error", "no")
    test_save_snapshot = "yes" == params.get("test_save_snapshot", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")

    secret_uuid = ""

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_place_in_location = params.get("auth_place_in_location")
            if 'source' in auth_place_in_location and not libvirt_version.version_compare(
                    3, 9, 0):
                test.cancel(
                    "place auth in source is not supported in current libvirt version"
                )
            auth_type = params.get("auth_type")
            secret_usage_target = params.get("secret_usage_target")
            secret_usage_type = params.get("secret_usage_type")
            chap_user = params.get("iscsi_user")
            chap_passwd = params.get("iscsi_password")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip=iscsi_host)

        # If we use qcow2 disk format, should format iscsi disk first.
        if device_format == "qcow2":
            cmd = (
                "qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s" %
                (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size))
            process.run(cmd, shell=True)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}

        # For lun type device, iothread attribute need to be set in controller.
        if driver_iothread and device != "lun":
            driver_dict.update({"iothread": driver_iothread})
            vmxml.iothreads = int(driver_iothread)
        elif driver_iothread:
            vmxml.iothreads = int(driver_iothread)

        disk_xml.driver = driver_dict
        # Check if we want to use a faked uuid.
        if not uuid:
            uuid = secret_uuid
        auth_dict = {}
        if auth_uuid:
            auth_dict = {
                "auth_user": chap_user,
                "secret_type": secret_usage_type,
                "secret_uuid": uuid
            }
        elif auth_usage:
            auth_dict = {
                "auth_user": chap_user,
                "secret_type": secret_usage_type,
                "secret_usage": secret_usage_target
            }
        disk_source = disk_xml.new_disk_source(
            **{
                "attrs": {
                    "protocol": "iscsi",
                    "name": "%s/%s" % (iscsi_target, lun_num)
                },
                "hosts": [{
                    "name": iscsi_host,
                    "port": iscsi_port
                }]
            })
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if 'source' in auth_place_in_location:
                disk_source.auth = disk_auth
            if 'disk' in auth_place_in_location:
                disk_xml.auth = disk_auth

        disk_xml.source = disk_source
        # Sync VM xml.
        vmxml.add_device(disk_xml)

        # After virtio 1.0 is enabled, lun type device need use virtio-scsi
        # instead of virtio, so additional controller is needed.
        # Add controller.
        if device == "lun":
            ctrl = Controller(type_name=cntlr_type)
            if cntlr_model is not None:
                ctrl.model = cntlr_model
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            ctrl_addr_dict = {}
            for addr_option in controller_addr_options.split(','):
                if addr_option != "":
                    addr_part = addr_option.split('=')
                    ctrl_addr_dict.update(
                        {addr_part[0].strip(): addr_part[1].strip()})
            ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict)

            # If driver_iothread is true, need add iothread attribute in controller.
            if driver_iothread:
                ctrl_driver_dict = {}
                ctrl_driver_dict.update({"iothread": driver_iothread})
                ctrl.driver = ctrl_driver_dict
            logging.debug("Controller XML is:%s", ctrl)
            if cntlr_type:
                vmxml.del_controller(cntlr_type)
            else:
                vmxml.del_controller("scsi")
            vmxml.add_device(ctrl)

        try:
            # Start the VM and check status.
            vmxml.sync()
            vm.start()
            if status_error:
                test.fail("VM started unexpectedly.")

            # Check Qemu command line
            if test_qemu_cmd:
                check_qemu_cmd()

        except virt_vm.VMStartError as e:
            if status_error:
                if re.search(uuid, str(e)):
                    pass
            else:
                test.fail("VM failed to start." "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % xml_error)
        else:
            # Check partitions in VM.
            if check_partitions:
                if not check_in_vm(device_target, old_parts):
                    test.fail("Check disk partitions in VM failed")
            # Test domain save/restore/snapshot.
            if test_save_snapshot:
                save_file = os.path.join(data_dir.get_tmp_dir(),
                                         "%.save" % vm_name)
                check_save_restore(save_file)
                check_snapshot()
                if os.path.exists(save_file):
                    os.remove(save_file)

    finally:
        # Delete snapshots.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
Exemplo n.º 36
0
def run(test, params, env):
    """
    Test hosted scsi device passthrough
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def prepare_hostdev_xml(**kwargs):
        """
        Prepare the scsi device's xml

        :param kwargs: The arguments to generate scsi host device xml.
        :return: The xml of the scsi host device.
        """
        hostdev_xml = hostdev.Hostdev()
        hostdev_xml.type = "scsi"
        if kwargs.get("managed"):
            hostdev_xml.managed = kwargs.get("managed")
        hostdev_xml.mode = kwargs.get("mode", "subsystem")
        if kwargs.get("sgio"):
            hostdev_xml.sgio = kwargs.get("sgio")
        if kwargs.get("rawio"):
            hostdev_xml.rawio = kwargs.get("rawio")
        hostdev_xml.readonly = "yes" == kwargs.get("readonly")
        hostdev_xml.shareable = "yes" == kwargs.get("shareable")

        source_args = {}
        source_protocol = kwargs.get("source_protocol")
        if source_protocol == "iscsi":
            # Use iscsi lun directly
            source_args['protocol'] = "iscsi"
            source_args['host_name'] = kwargs.get("iscsi_host", "ISCSI_HOST")
            source_args['host_port'] = kwargs.get("iscsi_port", "ISCSI_PORT")
            source_args['source_name'] = kwargs.get("iqn_name", "IQN_NAME")
            source_args['auth_user'] = kwargs.get("auth_user")
            source_args['secret_type'] = kwargs.get("secret_type")
            source_args['secret_uuid'] = kwargs.get("secret_uuid")
            source_args['secret_usage'] = kwargs.get("secret_usage")
            source_args['iqn_id'] = kwargs.get("iqn_id")
        elif source_protocol:
            test.cancel("We do not support source protocol = %s yet" %
                        source_protocol)
        else:
            # Use local scsi device
            source_args['adapter_name'] = kwargs.get("adapter_name",
                                                     "scsi_host999")
            source_args['bus'] = kwargs.get("addr_bus", "0")
            source_args['target'] = kwargs.get('addr_target', "0")
            source_args['unit'] = kwargs.get('addr_unit', "0")
        # If any attributes not used, remove them from source dict to avoid
        # attr="" or attr="None" situation.
        for key, value in list(source_args.items()):
            if not value:
                source_args.pop(key)
        hostdev_xml.source = hostdev_xml.new_source(**source_args)
        logging.info("hostdev xml is: %s", hostdev_xml)
        return hostdev_xml

    def prepare_iscsi_lun(emulated_img="emulated-iscsi", img_size='1G'):
        """
        Prepare iscsi lun

        :param emulated_img: The name of the iscsi lun device.
        :param img_size: The size of the iscsi lun device.
        :return: The iscsi target and lun number.
        """
        enable_chap_auth = "yes" == params.get("enable_chap_auth")
        if enable_chap_auth:
            chap_user = params.get("chap_user", "redhat")
            chap_passwd = params.get("chap_passwd", "password")
        else:
            chap_user = ""
            chap_passwd = ""
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            emulated_image=emulated_img,
            image_size=img_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip="127.0.0.1")
        return iscsi_target, lun_num

    def prepare_local_scsi(emulated_img="emulated-iscsi", img_size='1G'):
        """
        Prepare a local scsi device

        :param emulated_img: The name of the iscsi lun device.
        :param img_size: The size of the iscsi lun device.
        :return: The iscsi scsi/bus/target/unit number.
        """
        lun_info = []
        device_source = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=True,
            emulated_image=emulated_img,
            image_size=img_size)
        cmd = "targetcli ls"
        cmd_result = process.run(cmd, shell=True)
        logging.debug("new block device is: %s", device_source)
        cmd = "lsscsi | grep %s | awk '{print $1}'" % device_source
        cmd_result = process.run(cmd, shell=True)
        lun_info = re.findall("\d+", str(cmd_result.stdout.strip()))
        if len(lun_info) != 4:
            test.fail("Get wrong scsi lun info: %s" % lun_info)
        scsi_num = lun_info[0]
        bus_num = lun_info[1]
        target_num = lun_info[2]
        unit_num = lun_info[3]
        return scsi_num, bus_num, target_num, unit_num

    def get_new_disks(vm, old_partitions):
        """
        Get new disks in vm after hostdev plug.

        :param vm: The vm to be checked.
        :param old_partitions: Already existing partitions in vm.
        :return: New disks/partitions in vm, or None if no new disk/partitions.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                logging.debug("PPC machine may need a little sleep time "
                              "to see all disks, related owner may need "
                              "further investigation. Skip the sleep for now.")
                #time.sleep(10)
            new_partitions = utils_disk.get_parts_list(session)
            logging.debug("new partitions are: %s", new_partitions)
            added_partitions = list(
                set(new_partitions).difference(set(old_partitions)))
            session.close()
            if not added_partitions:
                logging.debug("No new partitions found in vm.")
            else:
                logging.debug("Newly added partition(s) is: %s",
                              added_partitions)
            return added_partitions
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as err:
            test.fail("Error happens when get new disk: %s" % str(err))

    def get_unpriv_sgio(scsi_dev):
        """
        Get scsi dev's unpriv_sgio value.

        :param scsi_dev: The scsi device to be checked.
        :return: The unpriv_sgio value of the scsi device.
        """
        cmd = "lsscsi -g | grep '\[%s\]'" % scsi_dev
        try:
            output = process.system_output(cmd, verbose=True, shell=True)
            blkdev = output.split()[-2]
            chardev = output.split()[-1]
            blk_stat = os.stat(blkdev)
            sg_stat = os.stat(chardev)
            blkdev_major = os.major(blk_stat.st_rdev)
            blkdev_minor = os.minor(blk_stat.st_rdev)
            chardev_major = os.major(sg_stat.st_rdev)
            chardev_minor = os.minor(sg_stat.st_rdev)
            blkdev_unpriv_path = ("/sys/dev/block/%s:%s/queue/unpriv_sgio" %
                                  (blkdev_major, blkdev_minor))
            chardev_unpriv_path = ("/sys/dev/char/%s:%s/device/unpriv_sgio" %
                                   (chardev_major, chardev_minor))
            # unpriv_sgio feature change in certain kernel,e.g: /sys/dev/char/%s:%s/queue/unpriv_sgio may not exist
            if os.path.exists(blkdev_unpriv_path) is False:
                return
            with open(blkdev_unpriv_path, 'r') as f:
                blkdev_unpriv_value = f.read().strip()
            with open(chardev_unpriv_path, 'r') as f:
                chardev_unpriv_value = f.read().strip()
            logging.debug("blkdev unpriv_sgio:%s\nchardev unpriv_sgio:%s",
                          blkdev_unpriv_value, chardev_unpriv_value)
            if ((not blkdev_unpriv_value or not chardev_unpriv_value)
                    or (blkdev_unpriv_value != chardev_unpriv_value)):
                test.error("unpriv_sgio values are incorrect under block "
                           "and char folders.")
            return blkdev_unpriv_value
        except Exception as detail:
            test.fail(
                "Error happens when try to get the unpriv_sgio value: %s" %
                detail)

    def check_unpriv_sgio(scsi_dev, unpriv_sgio=False, shareable_dev=True):
        """
        Check device's unpriv_sgio value with provided boolean value.

        :param scsi_dev: The scsi device to be checked.
        :param unpriv_sgio: If the expected unpriv_sgio is True or False.
        :param shareable_dev: If the device is a shareable one.
        """
        scsi_unpriv_sgio = get_unpriv_sgio(scsi_dev)
        # On rhel9, previously skip check folder in get_unpriv_sgio(),so here return True directly
        if scsi_unpriv_sgio is None:
            return True
        if shareable_dev:
            # Only when <shareable/> set, the sgio takes effect.
            if ((unpriv_sgio and scsi_unpriv_sgio == '1')
                    or (not unpriv_sgio and scsi_unpriv_sgio == '0')):
                return True
        else:
            if scsi_unpriv_sgio == '0':
                return True
        return False

    def check_disk_io(vm, partition):
        """
        Check if the disk partition in vm can be normally used.

        :param vm: The vm to be checked.
        :param partition: The disk partition in vm to be checked.
        :return: If the disk can be used, return True.
        """
        readonly = "yes" == params.get("readonly")
        readonly_keywords = ['readonly', 'read-only', 'read only']
        try:
            session = vm.wait_for_login()
            cmd = (
                "fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                "mkdir -p {0} && mount /dev/{0} {0} && echo"
                " teststring > {0}/testfile && umount {0}".format(partition))
            status, output = session.cmd_status_output(cmd)
            session.close()
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            if readonly:
                for ro_kw in readonly_keywords:
                    if ro_kw in str(output).lower():
                        return True
                logging.error("Hostdev set with 'readonly'. "
                              "But still can be operated.")
                return False
            return status == 0
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as err:
            logging.debug("Error happens when check disk io in vm: %s",
                          str(err))
            return False

    def ppc_controller_update(vmxml):
        """
        Update controller of ppc vm to 'virtio-scsi' to support 'scsi' type
        :return:
        """
        device_bus = 'scsi'
        if params.get('machine_type') == 'pseries':
            if not vmxml.get_controllers(device_bus, 'virtio-scsi'):
                vmxml.del_controller(device_bus)
                ppc_controller = Controller('controller')
                ppc_controller.type = device_bus
                ppc_controller.index = '0'
                ppc_controller.model = 'virtio-scsi'
                vmxml.add_device(ppc_controller)
                vmxml.sync()

    coldplug = "cold_plug" == params.get("attach_method")
    hotplug = "hot_plug" == params.get("attach_method")
    status_error = "yes" == params.get("status_error")
    use_iscsi_directly = "iscsi" == params.get("source_protocol")
    sgio = params.get("sgio")
    test_shareable = "yes" == params.get("shareable")
    device_num = int(params.get("device_num", "1"))
    new_disks = []
    new_disk = ""
    attach_options = ""
    iscsi_target = ""
    lun_num = ""
    adapter_name = ""
    addr_scsi = ""
    addr_bus = ""
    addr_target = ""
    addr_unit = ""
    auth_sec_uuid = ""
    hostdev_xmls = []

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    enable_initiator_set = "yes" == params.get("enable_initiator_set", "no")
    if enable_initiator_set and not libvirt_version.version_compare(6, 7, 0):
        test.cancel(
            "current version doesn't support iscsi initiator hostdev feature")
    try:
        # Load sg module if necessary
        process.run("modprobe sg",
                    shell=True,
                    ignore_status=True,
                    verbose=True)
        # Backup vms' xml
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml = vmxml_backup.copy()
        ppc_controller_update(vmxml)
        if test_shareable:
            vm_names = params.get("vms").split()
            if len(vm_names) < 2:
                test.error("At least 2 vms should be prepared "
                           "for shareable test.")
            vm2_name = vm_names[1]
            vm2 = env.get_vm(vm2_name)
            vm2_xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm2_name)
            vm2_xml = vm2_xml_backup.copy()
            ppc_controller_update(vm2_xml)
            if vm2.is_dead():
                vm2.start()
                session = vm2.wait_for_login()
                vm2_old_partitions = utils_disk.get_parts_list(session)
                session.close()

        # Get disk partitions info before hot/cold plug virtual disk
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()
        old_partitions = utils_disk.get_parts_list(session)
        session.close()
        for dev_num in range(device_num):
            if use_iscsi_directly:
                iscsi_target, lun_num = prepare_iscsi_lun(emulated_img='img' +
                                                          str(dev_num))
                params['iscsi_host'] = "127.0.0.1"
                params['iscsi_port'] = "3260"
                params['iqn_name'] = iscsi_target + "/" + lun_num
            else:
                addr_scsi, addr_bus, addr_target, addr_unit = prepare_local_scsi(
                    emulated_img='img' + str(dev_num))
                if not params.get('adapter_name') or dev_num >= 1:
                    params['adapter_name'] = "scsi_host" + addr_scsi
                params['addr_bus'] = addr_bus
                params['addr_target'] = addr_target
                params['addr_unit'] = addr_unit
                lsscsi_keyword = (addr_scsi + ":" + addr_bus + ":" +
                                  addr_target + ":" + addr_unit)

            enable_chap_auth = "yes" == params.get("enable_chap_auth")
            auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi")
            if enable_chap_auth:
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_password", "password")
                auth_sec_dict = {
                    "sec_usage": "iscsi",
                    "sec_target": auth_sec_usage
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid,
                                       chap_passwd,
                                       encode=True,
                                       debug=True)
                params['auth_user'] = chap_user
                params['secret_type'] = "iscsi"
                params['secret_uuid'] = auth_sec_uuid

            if enable_initiator_set:
                params['iqn_id'] = iscsi_target
            hostdev_xml = prepare_hostdev_xml(**params)
            hostdev_xmls.append(hostdev_xml)

        if coldplug:
            attach_options = "--config"
        # Attach virtual disk to vm
        for dev_num in range(device_num):
            result = virsh.attach_device(vm_name,
                                         hostdev_xmls[dev_num].xml,
                                         flagstr=attach_options,
                                         ignore_status=True,
                                         debug=True)
            libvirt.check_exit_status(result, status_error & hotplug)
        if coldplug:
            vm.destroy(gracefully=False)
            result = virsh.start(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result, status_error & coldplug)
        if not status_error:
            vm.wait_for_login().close()
            # Here we may need to wait for sometime, update if issue happens
            # again.
            #time.sleep(10)
            utils_misc.wait_for(lambda: get_new_disks(vm, old_partitions), 20)
            new_disks = get_new_disks(vm, old_partitions)
            if len(new_disks) != device_num:
                test.fail("Attached %s virtual disk but got %s." %
                          (device_num, len(new_disks)))
            new_disk = new_disks[0]
            for new_disk in new_disks:
                # Check disk io of the hostdev in vm.
                if not check_disk_io(vm, new_disk):
                    test.fail("Got unexpected result when operate the newly "
                              "added disk in vm.")

                # Check if unpri_sgio value correctly set by the xml sgio param.
                if not use_iscsi_directly:
                    if sgio == "unfiltered":
                        unpriv_sgio = True
                    else:
                        unpriv_sgio = False
                    if not (check_unpriv_sgio(lsscsi_keyword, unpriv_sgio,
                                              test_shareable)):
                        test.fail(
                            "SCSI dev's unpriv_sgio value is inconsistent with "
                            "hostdev xml's sgio value.")

                # Check shareable device.
                if test_shareable:
                    vm2_xml.add_device(hostdev_xml)
                    session = vm2.wait_for_login()
                    result = virsh.attach_device(vm2_name,
                                                 hostdev_xml.xml,
                                                 ignore_status=False,
                                                 debug=True)
                    utils_misc.wait_for(
                        lambda: get_new_disks(vm2, vm2_old_partitions), 20)
                    vm2_new_disks = get_new_disks(vm2, vm2_old_partitions)
                    if len(vm2_new_disks) != 1:
                        test.fail(
                            "In second vm, attached 1 virtual disk but got %s."
                            % len(vm2_new_disks))
                    vm2_new_disk = vm2_new_disks[0]
                    if not check_disk_io(vm2, vm2_new_disk):
                        test.fail(
                            "Testing shareable device, got unexpected result "
                            "when operate the newly added disk in the second vm."
                        )

            # Detach the disk from vm.
            for dev_num in range(device_num):
                result = virsh.detach_device(vm_name,
                                             hostdev_xmls[dev_num].xml,
                                             flagstr=attach_options,
                                             ignore_status=False,
                                             debug=True)

            # Check the detached disk in vm.
            if coldplug:
                vm.destroy(gracefully=False)
                vm.start()
                vm.wait_for_login().close()
            utils_misc.wait_for(lambda: not get_new_disks(vm, old_partitions),
                                20)
            new_disks = get_new_disks(vm, old_partitions)
            if len(new_disks) != 0:
                test.fail("Unplug virtual disk failed.")
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Restoring vm
        vmxml_backup.sync()
        if test_shareable:
            if vm2.is_alive():
                vm2.destroy(gracefully=False)
            vm2_xml_backup.sync()
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        for dev_num in range(device_num):
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           emulated_image='img' + str(dev_num))
def run(test, params, env):
    """
    Test SCSI3 Persistent Reservation functions.

    1.Prepare iscsi backend storage.
    2.Prepare disk xml.
    3.Hot/cold plug the disk to vm.
    4.Check if SCSI3 Persistent Reservation commands can be issued to that disk.
    5.Recover test environment.
    6.Confirm the test result.
    """
    def get_delta_parts(vm, old_parts):
        """
        Get the newly added partitions/blockdevs in vm.
        :param vm: The vm to be operated.
        :param old_parts: The original partitions/blockdevs in vm.
        :return: Newly added partitions/blockdevs.
        """
        session = vm.wait_for_login()
        new_parts = utils_disk.get_parts_list(session)
        new_parts = list(set(new_parts).difference(set(old_parts)))
        session.close()
        return new_parts

    def check_pr_cmds(vm, blk_dev):
        """
        Check if SCSI3 Persistent Reservation commands can be used in vm.
        :param vm: The vm to be checked.
        :param blk_dev: The block device in vm to be checked.
        """
        session = vm.wait_for_login()
        cmd = ("sg_persist --no-inquiry -v --out --register-ignore --param-sark 123aaa /dev/{0} &&"
               "sg_persist --no-inquiry --in -k /dev/{0} &&"
               "sg_persist --no-inquiry -v --out --reserve --param-rk 123aaa --prout-type 5 /dev/{0} &&"
               "sg_persist --no-inquiry --in -r /dev/{0} &&"
               "sg_persist --no-inquiry -v --out --release --param-rk 123aaa --prout-type 5 /dev/{0} &&"
               "sg_persist --no-inquiry --in -r /dev/{0} &&"
               "sg_persist --no-inquiry -v --out --register --param-rk 123aaa --prout-type 5 /dev/{0} &&"
               "sg_persist --no-inquiry --in -k /dev/{0}"
               .format(blk_dev))
        cmd_status, cmd_output = session.cmd_status_output(cmd)
        session.close()
        if cmd_status == 127:
            test.error("sg3_utils not installed in test image")
        elif cmd_status != 0:
            test.fail("persistent reservation failed for /dev/%s" % blk_dev)
        else:
            logging.info("persistent reservation successful for /dev/%s" % blk_dev)

    def start_or_stop_qemu_pr_helper(is_start=True, path_to_sock="/var/run/qemu-pr-helper.sock"):
        """
        Start or stop qemu-pr-helper daemon
        :param is_start: Set True to start, False to stop.
        """
        service_mgr = service.ServiceManager()
        if is_start:
            service_mgr.start('qemu-pr-helper')
            time.sleep(2)
            shutil.chown(path_to_sock, "qemu", "qemu")
        else:
            service_mgr.stop('qemu-pr-helper')

    def ppc_controller_update():
        """
        Update controller of ppc vm to 'virtio-scsi' to support 'scsi' type

        :return:
        """
        if params.get('machine_type') == 'pseries' and device_bus == 'scsi':
            if not vmxml.get_controllers(device_bus, 'virtio-scsi'):
                vmxml.del_controller(device_bus)
                ppc_controller = Controller('controller')
                ppc_controller.type = device_bus
                ppc_controller.index = '0'
                ppc_controller.model = 'virtio-scsi'
                vmxml.add_device(ppc_controller)
                vmxml.sync()

    # Check if SCSI3 Persistent Reservations supported by
    # current libvirt versions.
    if not libvirt_version.version_compare(4, 4, 0):
        test.cancel("The <reservations> tag supported by libvirt from version "
                    "4.4.0")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    # Disk specific attributes
    device = params.get("virt_disk_device", "lun")
    device_target = params.get("virt_disk_device_target", "sdb")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "block")
    device_bus = params.get("virt_disk_device_bus", "scsi")
    # Iscsi options
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1G")
    auth_uuid = "yes" == params.get("auth_uuid")
    auth_usage = "yes" == params.get("auth_usage")
    # SCSI3 PR options
    reservations_managed = "yes" == params.get("reservations_managed", "yes")
    reservations_source_type = params.get("reservations_source_type", "unix")
    reservations_source_path = params.get("reservations_source_path",
                                          "/var/run/qemu-pr-helper.sock")
    reservations_source_mode = params.get("reservations_source_mode", "client")
    secret_uuid = ""
    # Case step options
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")

    # Start vm and get all partions in vm
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_in_source = "yes" == params.get("auth_in_source", "no")
            if auth_in_source and not libvirt_version.version_compare(3, 9, 0):
                test.cancel("place auth in source is not supported in "
                            "current libvirt version.")
            auth_type = params.get("auth_type", "chap")
            secret_usage_target = params.get("secret_usage_target",
                                             "libvirtiscsi")
            secret_usage_type = params.get("secret_usage_type", "iscsi")
            chap_user = params.get("iscsi_user", "redhat")
            chap_passwd = params.get("iscsi_password", "redhat")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(str(chap_passwd).encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        blk_dev = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                 is_login=True,
                                                 image_size=emulated_size,
                                                 chap_user=chap_user,
                                                 chap_passwd=chap_passwd,
                                                 portal_ip=iscsi_host)

        # Add disk xml
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        auth_dict = {}
        if auth_uuid:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_uuid": secret_uuid}
        elif auth_usage:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_usage": secret_usage_target}
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {"dev": blk_dev}})
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if auth_in_source:
                disk_source.auth = disk_auth
            else:
                disk_xml.auth = disk_auth
        if reservations_managed:
            reservations_dict = {"reservations_managed": "yes"}
        else:
            start_or_stop_qemu_pr_helper(path_to_sock=reservations_source_path)
            reservations_dict = {"reservations_managed": "no",
                                 "reservations_source_type": reservations_source_type,
                                 "reservations_source_path": reservations_source_path,
                                 "reservations_source_mode": reservations_source_mode}
        disk_source.reservations = disk_xml.new_reservations(**reservations_dict)
        disk_xml.source = disk_source

        # Update controller of ppc vms
        ppc_controller_update()

        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        try:
            # Start the VM and check status
            vmxml.sync()
            vm.start()
            vm.wait_for_login().close()
            time.sleep(5)
            if hotplug_disk:
                result = virsh.attach_device(vm_name, disk_xml.xml,
                                             ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            new_parts = get_delta_parts(vm, old_parts)
            if len(new_parts) != 1:
                logging.error("Expected 1 dev added but has %s" % len(new_parts))
            new_part = new_parts[0]
            check_pr_cmds(vm, new_part)
            result = virsh.detach_device(vm_name, disk_xml.xml,
                                         ignore_status=True, debug=True, wait_remove_event=True)
            libvirt.check_exit_status(result)
        except virt_vm.VMStartError as e:
            test.fail("VM failed to start."
                      "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            test.fail("Failed to define VM:\n%s" % xml_error)

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")
        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
        # Stop qemu-pr-helper daemon
        start_or_stop_qemu_pr_helper(is_start=False)
def run(test, params, env):
    """
    Test command: secret-dumpxml <secret>

    Output attributes of a secret as an XML dump to stdout.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    status_error = ("yes" == params.get("status_error", "no"))
    secret_ref = params.get("secret_ref")

    # acl polkit params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    virsh_dargs = {'debug': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs['unprivileged_user'] = unprivileged_user
        virsh_dargs['uri'] = uri

    if secret_ref == "secret_valid_uuid":
        # Generate valid uuid
        cmd = "uuidgen"
        status, uuid = commands.getstatusoutput(cmd)
        if status:
            raise error.TestNAError("Failed to generate valid uuid")

    # Get a full path of tmpfile, the tmpfile need not exist
    tmp_dir = data_dir.get_tmp_dir()
    volume_path = os.path.join(tmp_dir, "secret_volume")

    secret_xml = """
<secret ephemeral='no' private='yes'>
  <uuid>%s</uuid>
  <usage type='volume'>
    <volume>%s</volume>
  </usage>
</secret>
""" % (uuid, volume_path)

    # Write secret xml into a tmpfile
    tmp_file = tempfile.NamedTemporaryFile(prefix=("secret_xml_"), dir=tmp_dir)
    xmlfile = tmp_file.name
    tmp_file.close()

    fd = open(xmlfile, 'w')
    fd.write(secret_xml)
    fd.close()

    try:
        virsh.secret_define(xmlfile, debug=True)

        cmd_result = virsh.secret_dumpxml(uuid, **virsh_dargs)
        output = cmd_result.stdout.strip()
        if not status_error and cmd_result.exit_status:
            raise error.TestFail("Dumping the xml of secret object failed")

        match_string = "<uuid>%s</uuid>" % uuid
        if not re.search(match_string, output):
            raise error.TestFail("The secret xml is not valid")
    finally:
        #Cleanup
        virsh.secret_undefine(uuid, debug=True)

        if os.path.exists(xmlfile):
            os.remove(xmlfile)
Exemplo n.º 39
0
def run(test, params, env):
    """
    Test virsh vol-create and vol-create-as command to cover the following matrix:
    pool_type = [dir, fs, netfs]
    volume_format = [raw, bochs, cloop, cow, dmg, iso, qcow, qcow2, qed,
                     vmdk, vpc]

    pool_type = [disk]
    volume_format = [none, linux, fat16, fat32, linux-swap, linux-lvm,
                     linux-raid, extended]

    pool_type = [logical]
    volume_format = [none]

    pool_type = [iscsi, scsi]
    Not supported with format type

    TODO:
    pool_type = [rbd, glusterfs]

    Reference: http://www.libvirt.org/storage.html
    """

    src_pool_type = params.get("src_pool_type")
    src_pool_target = params.get("src_pool_target")
    src_pool_format = params.get("src_pool_format", "")
    pool_vol_num = int(params.get("src_pool_vol_num", '1'))
    src_emulated_image = params.get("src_emulated_image")
    extra_option = params.get("extra_option", "")
    prefix_vol_name = params.get("vol_name", "vol_create_test")
    vol_format = params.get("vol_format", "raw")
    vol_capacity = params.get("vol_capacity", 1048576)
    vol_allocation = params.get("vol_allocation", 1048576)
    image_size = params.get("emulate_image_size", "1G")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    status_error = "yes" == params.get("status_error", "no")
    by_xml = "yes" == params.get("create_vol_by_xml", "yes")
    incomplete_target = "yes" == params.get("incomplete_target", "no")
    luks_encrypted = "luks" == params.get("encryption_method")
    encryption_secret_type = params.get("encryption_secret_type", "passphrase")
    virsh_readonly_mode = 'yes' == params.get("virsh_readonly", "no")
    vm_name = params.get("main_vm")

    if vm_name:
        vm = env.get_vm(vm_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        params["orig_config_xml"] = vmxml.copy()

    if not libvirt_version.version_compare(1, 0, 0):
        if "--prealloc-metadata" in extra_option:
            test.cancel("metadata preallocation not supported in"
                        " current libvirt version.")
        if incomplete_target:
            test.cancel("It does not support generate target path"
                        "in current libvirt version.")

    pool_type = ['dir', 'disk', 'fs', 'logical', 'netfs', 'iscsi', 'scsi']
    if src_pool_type not in pool_type:
        test.cancel("pool type %s not in supported type list: %s" %
                    (src_pool_type, pool_type))

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in params.keys():
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    def attach_disk_encryption(vol_path, uuid, params):
        """
        Attach a disk with luks encryption

        :vol_path: the volume path used in disk XML
        :uuid: the secret uuid of the volume
        :params: the parameter dictionary
        :raise: test.fail when disk cannot be attached

        """
        target_dev = params.get("target_dev", "vdb")
        vol_format = params.get("vol_format", "qcow2")
        disk_path = vol_path
        new_disk_dict = {}
        new_disk_dict.update({
            "driver_type": vol_format,
            "source_encryption_dict": {
                "encryption": 'luks',
                "secret": {
                    "type": "passphrase",
                    "uuid": uuid
                }
            }
        })
        result = utlv.attach_additional_device(vm_name, target_dev, disk_path,
                                               new_disk_dict)
        if result.exit_status:
            raise test.fail("Attach device %s failed." % target_dev)

    def check_vm_start():
        """
        Start a guest

        :params: the parameter dictionary
        :raise: test.fail when VM cannot be started
        """
        if not vm.is_alive():
            try:
                vm.start()
            except virt_vm.VMStartError as err:
                test.fail("Failed to start VM: %s" % err)

    def create_luks_secret(vol_path):
        """
        Create secret for luks encryption
        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        utlv.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as detail:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            'redhat'.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string)
        utlv.check_exit_status(ret)

        return encryption_uuid

    def post_process_vol(ori_vol_path):
        """
        Create or disactive a volume without libvirt

        :param ori_vol_path: Full path of an original volume
        :retur: Volume name for checking
        """
        process_vol_name = params.get("process_vol_name", "process_vol")
        process_vol_options = params.get("process_vol_options", "")
        process_vol_capacity = params.get("process_vol_capacity", vol_capacity)
        process_vol_cmd = ""
        unsupport_err = "Unsupport do '%s %s' in this test" % (
            process_vol_by, process_vol_type)
        if process_vol_by == "lvcreate":
            process_vol_cmd = "lvcreate -L %s " % process_vol_capacity
            if process_vol_type == "thin":
                if not process_vol_options:
                    process_vol_options = "-T "
                process_vol_cmd += "%s " % process_vol_options
                processthin_pool_name = params.get("processthin_pool_name",
                                                   "thinpool")
                processthin_vol_name = params.get("processthin_vol_name",
                                                  "thinvol")
                process_vol_capacity = params.get("process_vol_capacity", "1G")
                os.path.dirname(ori_vol_path)
                process_vol_cmd += "%s/%s " % (os.path.dirname(ori_vol_path),
                                               processthin_pool_name)
                process_vol_cmd += "-V %s " % process_vol_capacity
                process_vol_cmd += "-n %s " % processthin_vol_name
                process_vol_name = processthin_vol_name
            elif process_vol_type == "snapshot":
                if not process_vol_options:
                    process_vol_options = "-s "
                process_vol_cmd += "%s " % process_vol_options
                process_vol_cmd += "-n %s " % process_vol_name
                process_vol_cmd += "%s " % (ori_vol_path)
            else:
                logging.error(unsupport_err)
                return
        elif process_vol_by == "qemu-img" and process_vol_type == "create":
            process_vol_cmd = "qemu-img create "
            process_vol_path = os.path.dirname(ori_vol_path) + "/"
            process_vol_path += process_vol_name
            process_vol_cmd += "%s " % process_vol_options
            process_vol_cmd += "%s " % process_vol_path
            process_vol_cmd += "%s " % process_vol_capacity
        elif process_vol_by == "lvchange" and process_vol_type == "deactivate":
            process_vol_cmd = "lvchange %s " % ori_vol_path
            if not process_vol_options:
                process_vol_options = "-an"
            process_vol_cmd += process_vol_options
        else:
            logging.error(unsupport_err)
            return
        rst = process.run(process_vol_cmd, ignore_status=True, shell=True)
        if rst.exit_status:
            if "Snapshots of snapshots are not supported" in rst.stderr_text:
                logging.debug("%s is already a snapshot volume", ori_vol_path)
                process_vol_name = os.path.basename(ori_vol_path)
            else:
                logging.error(rst.stderr_text)
                return
        return process_vol_name

    def check_vol(pool_name, vol_name, expect_exist=True):
        """
        Check volume vol_name in pool pool_name
        """
        src_volumes = src_pv.list_volumes().keys()
        logging.debug("Current volumes in %s: %s", pool_name, src_volumes)
        if expect_exist:
            if vol_name not in src_volumes:
                test.fail("Can't find volume %s in pool %s" %
                          (vol_name, pool_name))
            # check format in volume xml
            volxml = libvirt_xml.VolXML()
            post_xml = volxml.new_from_vol_dumpxml(vol_name, pool_name)
            logging.debug("Volume %s XML: %s" %
                          (vol_name, post_xml.xmltreefile))
            if 'format' in post_xml.keys() and vol_format is not None:
                if post_xml.format != vol_format:
                    test.fail("Volume format %s is not expected" % vol_format +
                              " as defined.")
        else:
            if vol_name in src_volumes:
                test.fail("Find volume %s in pool %s, but expect not" %
                          (vol_name, pool_name))

    fmt_err0 = "Unknown file format '%s'" % vol_format
    fmt_err1 = "Formatting or formatting option not "
    fmt_err1 += "supported for file format '%s'" % vol_format
    fmt_err2 = "Driver '%s' does not support " % vol_format
    fmt_err2 += "image creation"
    fmt_err_list = [fmt_err0, fmt_err1, fmt_err2]
    skip_msg = "Volume format '%s' is not supported by qemu-img" % vol_format
    vol_path_list = []
    secret_uuids = []
    try:
        # Create the src pool
        src_pool_name = "virt-%s-pool" % src_pool_type
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(src_pool_name,
                     src_pool_type,
                     src_pool_target,
                     src_emulated_image,
                     image_size=image_size,
                     source_format=src_pool_format)

        src_pv = libvirt_storage.PoolVolume(src_pool_name)
        src_pool_uuid = libvirt_storage.StoragePool().pool_info(
            src_pool_name)['UUID']
        # Print current pools for debugging
        logging.debug("Current pools:%s",
                      libvirt_storage.StoragePool().list_pools())
        # Create volumes by virsh in a loop
        while pool_vol_num > 0:
            # Set volume xml file
            vol_name = prefix_vol_name + "_%s" % pool_vol_num
            bad_vol_name = params.get("bad_vol_name", "")
            if bad_vol_name:
                vol_name = bad_vol_name
            pool_vol_num -= 1
            # disk partition for new volume
            if src_pool_type == "disk":
                vol_name = utlv.new_disk_vol_name(src_pool_name)
                if vol_name is None:
                    test.error("Fail to generate volume name")
            if by_xml:
                # According to BZ#1138523, we need inpect the right name
                # (disk partition) for new volume
                if src_pool_type == "disk":
                    vol_name = utlv.new_disk_vol_name(src_pool_name)
                    if vol_name is None:
                        test.error("Fail to generate volume name")
                vol_arg['name'] = vol_name
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                if luks_encrypted:
                    if vol_format == "qcow2" and not libvirt_version.version_compare(
                            6, 10, 0):
                        test.cancel("Qcow2 format with luks encryption"
                                    " is not supported in current libvirt")
                    # For luks encrypted disk, add related xml in newvol
                    luks_encryption_params = {}
                    luks_encryption_params.update({"format": "luks"})
                    luks_secret_uuid = create_luks_secret(
                        os.path.join(src_pool_target, vol_name))
                    secret_uuids.append(luks_secret_uuid)
                    luks_encryption_params.update({
                        "secret": {
                            "type": encryption_secret_type,
                            "uuid": luks_secret_uuid
                        }
                    })
                    newvol.encryption = volxml.new_encryption(
                        **luks_encryption_params)
                vol_xml = newvol['xml']
                if params.get('setup_libvirt_polkit') == 'yes':
                    process.run("chmod 666 %s" % vol_xml,
                                ignore_status=True,
                                shell=True)
                    if luks_encrypted and libvirt_version.version_compare(
                            4, 5, 0):
                        try:
                            polkit = test_setup.LibvirtPolkitConfig(params)
                            polkit_rules_path = polkit.polkit_rules_path
                            with open(polkit_rules_path, 'r+') as f:
                                rule = f.readlines()
                                for index, v in enumerate(rule):
                                    if v.find("secret") >= 0:
                                        nextline = rule[index + 1]
                                        s = nextline.replace(
                                            "QEMU", "secret").replace(
                                                "pool_name",
                                                "secret_uuid").replace(
                                                    "virt-dir-pool",
                                                    "%s" % luks_secret_uuid)
                                        rule[index + 1] = s
                                rule = ''.join(rule)
                            with open(polkit_rules_path, 'w+') as f:
                                f.write(rule)
                            logging.debug(rule)
                            polkit.polkitd.restart()
                        except IOError as e:
                            logging.error(e)
                # Run virsh_vol_create to create vol
                logging.debug("Create volume from XML: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(
                    src_pool_name,
                    vol_xml,
                    extra_option,
                    unprivileged_user=unprivileged_user,
                    uri=uri,
                    ignore_status=True,
                    debug=True)

                if luks_encrypted and vol_format == "qcow2":
                    vol_path = virsh.vol_path(vol_name,
                                              src_pool_name).stdout.strip()
                    uuid = luks_secret_uuid
                    attach_disk_encryption(vol_path, uuid, params)
                    check_vm_start()

            else:
                # Run virsh_vol_create_as to create_vol
                pool_name = src_pool_name
                if params.get("create_vol_by_pool_uuid") == "yes":
                    pool_name = src_pool_uuid
                cmd_result = virsh.vol_create_as(
                    vol_name,
                    pool_name,
                    vol_capacity,
                    vol_allocation,
                    vol_format,
                    extra_option,
                    unprivileged_user=unprivileged_user,
                    uri=uri,
                    readonly=virsh_readonly_mode,
                    ignore_status=True,
                    debug=True)
            # Check result
            try:
                utlv.check_exit_status(cmd_result, status_error)
                check_vol(src_pool_name, vol_name, not status_error)
                if bad_vol_name:
                    pattern = "volume name '%s' cannot contain '/'" % vol_name
                    logging.debug("pattern: %s", pattern)
                    if "\\" in pattern and by_xml:
                        pattern = pattern.replace("\\", "\\\\")
                    if re.search(pattern, cmd_result.stderr) is None:
                        test.fail("vol-create failed with unexpected reason")
                if not status_error:
                    vol_path = virsh.vol_path(vol_name,
                                              src_pool_name).stdout.strip()
                    logging.debug("Full path of %s: %s", vol_name, vol_path)
                    vol_path_list.append(vol_path)
            except exceptions.TestFail as detail:
                stderr = cmd_result.stderr
                if any(err in stderr for err in fmt_err_list):
                    test.cancel(skip_msg)
                else:
                    test.fail("Create volume fail:\n%s" % detail)
        # Post process vol by other programs
        process_vol_by = params.get("process_vol_by")
        process_vol_type = params.get("process_vol_type", "")
        expect_vol_exist = "yes" == params.get("expect_vol_exist", "yes")
        if process_vol_by and vol_path_list:
            process_vol = post_process_vol(vol_path_list[0])
            if process_vol is not None:
                try:
                    virsh.pool_refresh(src_pool_name, ignore_status=False)
                    check_vol(src_pool_name, process_vol, expect_vol_exist)
                except (process.CmdError, exceptions.TestFail) as detail:
                    if process_vol_type == "thin":
                        logging.error(str(detail))
                        test.cancel("You may encounter bug BZ#1060287")
                    else:
                        test.fail("Fail to refresh pool:\n%s" % detail)
            else:
                test.fail("Post process volume failed")
    finally:
        # Cleanup
        # For old version lvm2(2.02.106 or early), deactivate volume group
        # (destroy libvirt logical pool) will fail if which has deactivated
        # lv snapshot, so before destroy the pool, we need activate it manually
        if vm_name:
            virsh.destroy(vm_name, debug=True, ignore_status=True)
        if params.get("orig_config_xml"):
            params.get("orig_config_xml").sync()
        if src_pool_type == 'logical' and vol_path_list:
            vg_name = vol_path_list[0].split('/')[2]
            process.run("lvchange -ay %s" % vg_name, shell=True)
        try:
            pvt.cleanup_pool(src_pool_name, src_pool_type, src_pool_target,
                             src_emulated_image)
            for secret_uuid in set(secret_uuids):
                virsh.secret_undefine(secret_uuid)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
Exemplo n.º 40
0
def run(test, params, env):
    '''
    Test the command virsh pool-create-as

    (1) Prepare backend storage device
    (2) Define secret xml and set secret value
    (3) Test pool-create-as or virsh pool-define with authentication
    '''

    pool_options = params.get('pool_options', '')
    pool_name = params.get('pool_name')
    pool_type = params.get('pool_type')
    pool_target = params.get('pool_target', '')
    status_error = params.get('status_error') == "yes"

    # iscsi options
    emulated_size = params.get("iscsi_image_size", "1")
    iscsi_host = params.get("iscsi_host", "127.0.0.1")
    chap_user = params.get("iscsi_user")
    chap_passwd = params.get("iscsi_password")

    # ceph options
    ceph_auth_user = params.get("ceph_auth_user")
    ceph_auth_key = params.get("ceph_auth_key")
    ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
    ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
    ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
    ceph_client_name = params.get("ceph_client_name")
    ceph_client_key = params.get("ceph_client_key")
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    key_opt = "--keyring %s" % key_file

    # auth options
    auth_usage = (params.get('auth_usage') == 'yes')
    auth_uuid = (params.get('auth_uuid') == 'yes')
    sec_ephemeral = params.get("secret_ephemeral", "no")
    sec_private = params.get("secret_private", "yes")
    sec_desc = params.get("secret_description")
    auth_type = params.get("auth_type")
    sec_usage = params.get("secret_usage_type")
    sec_target = params.get("secret_usage_target")
    sec_name = params.get("secret_name")
    auth_sec_dict = {
        "sec_ephemeral": sec_ephemeral,
        "sec_private": sec_private,
        "sec_desc": sec_desc,
        "sec_usage": sec_usage,
        "sec_target": sec_target,
        "sec_name": sec_name
    }

    if sec_usage == "iscsi":
        auth_username = chap_user
        sec_password = chap_passwd
        secret_usage = sec_target

    if sec_usage == "ceph":
        auth_username = ceph_auth_user
        sec_password = ceph_auth_key
        secret_usage = sec_name

    if pool_target and not os.path.isdir(pool_target):
        if os.path.isfile(pool_target):
            logging.error('<target> must be a directory')
        else:
            os.makedirs(pool_target)

    def setup_ceph_auth():
        disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip))
        disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key))

        if not utils_package.package_install(["ceph-common"]):
            test.error("Failed to install ceph-common")

        with open(key_file, 'w') as f:
            f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key))

        # Delete the disk if it exists
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
               "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
        process.run(cmd, ignore_status=True, shell=True)

        # Create an local image and make FS on it.
        img_file = os.path.join(data_dir.get_tmp_dir(), "test.img")
        disk_cmd = ("qemu-img create -f raw {0} 10M && mkfs.ext4 -F {0}".
                    format(img_file))
        process.run(disk_cmd, ignore_status=False, shell=True)

        # Convert the image to remote storage
        # Ceph can only support raw format
        disk_cmd = ("qemu-img convert -O %s %s %s" %
                    ("raw", img_file, disk_path))
        process.run(disk_cmd, ignore_status=False, shell=True)

    def setup_iscsi_auth():
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd)
        return iscsi_target

    def check_auth_in_xml(dparams):
        sourcexml = pool_xml.PoolXML.new_from_dumpxml(pool_name).get_source()
        with open(sourcexml.xml) as xml_f:
            logging.debug("Source XML is: \n%s", xml_f.read())

        # Check result
        try:
            for name, v_expect in dparams.items():
                if v_expect != sourcexml[name]:
                    test.fail("Expect to find %s=%s, but got %s=%s" %
                              (name, v_expect, name, sourcexml[name]))
        except xcepts.LibvirtXMLNotFoundError as details:
            if "usage not found" in str(details) and auth_uuid:
                pass  # Not a auth_usage test
            elif "uuid not found" in str(details) and auth_usage:
                pass  # Not a auth_uuid test
            else:
                test.fail(details)

    def check_result(result, expect_error=False):
        # pool-define-as return CmdResult
        if isinstance(result, process.CmdResult):
            result = (result.exit_status == 0)  # True means run success

        if expect_error:
            if result:
                test.fail("Expect to fail but run success")
        elif not expect_error:
            if not result:
                test.fail("Expect to succeed but run failure")
        else:
            logging.info("It's an expected error")

    if not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Pool create/define with authentication"
                    " not support in this libvirt version")

    sec_uuid = ""
    img_file = ""
    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    libvirt_pool = libvirt_storage.StoragePool()
    try:
        # Create secret xml and set value
        encode = True
        if sec_usage == "ceph":
            encode = False  # Ceph key already encoded
        sec_uuid = libvirt.create_secret(auth_sec_dict)
        virsh.secret_set_value(sec_uuid,
                               sec_password,
                               encode=encode,
                               debug=True)

        if sec_usage == "iscsi":
            iscsi_dev = setup_iscsi_auth()
            pool_options += (" --source-host %s --source-dev %s"
                             " --auth-type %s --auth-username %s" %
                             (iscsi_host, iscsi_dev, auth_type, auth_username))

        if sec_usage == "ceph":
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            setup_ceph_auth()
            rbd_pool = ceph_disk_name.split('/')[0]
            pool_options += (
                " --source-host %s --source-name %s"
                " --auth-type %s --auth-username %s" %
                (ceph_host_ip, rbd_pool, auth_type, auth_username))

        if auth_usage:
            pool_options += " --secret-usage %s" % secret_usage

        if auth_uuid:
            pool_options += " --secret-uuid %s" % sec_uuid

        # Run test cases
        func_name = params.get("test_func", "pool_create_as")
        logging.info('Perform test runner: %s', func_name)
        if func_name == "pool_create_as":
            func = virsh.pool_create_as
        if func_name == "pool_define_as":
            func = virsh.pool_define_as
        result = func(pool_name,
                      pool_type,
                      pool_target,
                      extra=pool_options,
                      debug=True)

        # Check status_error
        check_result(result, expect_error=status_error)
        if not status_error:
            # Check pool status
            pool_status = libvirt_pool.pool_state(pool_name)
            if ((pool_status == 'inactive' and func_name == "pool_define_as")
                    or
                (pool_status == "active" and func_name == "pool_create_as")):
                logging.info("Expected pool status:%s" % pool_status)
            else:
                test.fail("Not an expected pool status: %s" % pool_status)
            # Check pool dumpxml
            dict_expect = {
                "auth_type": auth_type,
                "auth_username": auth_username,
                "secret_usage": secret_usage,
                "secret_uuid": sec_uuid
            }
            check_auth_in_xml(dict_expect)
    finally:
        # Clean up
        logging.info("Start to cleanup")
        # Remove ceph configure file if created.
        if ceph_cfg:
            os.remove(ceph_cfg)
        if os.path.exists(img_file):
            os.remove(img_file)
        virsh.secret_undefine(sec_uuid, ignore_status=True)
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        if libvirt_pool.pool_exists(pool_name):
            libvirt_pool.delete_pool(pool_name)
Exemplo n.º 41
0
def run(test, params, env):
    """
    Test command: virsh secret-list

    Returns a list of secrets
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    status_error = ("yes" == params.get("status_error", "no"))
    secret_list_option = params.get("secret_list_option", "")

    # acl polkit params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    virsh_dargs = {'debug': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs['unprivileged_user'] = unprivileged_user
        virsh_dargs['uri'] = uri

    uuid_list = []
    for i in ['yes', 'no']:
        for j in ['yes', 'no']:
            # Generate valid uuid
            cmd = "uuidgen"
            status, uuid = process.getstatusoutput(cmd)
            if status:
                test.cancel("Failed to generate valid uuid")
            uuid_list.append(uuid)

            # Get a full path of tmpfile, the tmpfile need not exist
            tmp_dir = data_dir.get_tmp_dir()
            volume_path = os.path.join(tmp_dir, "secret_volume_%s_%s" % (i, j))

            secret_xml_obj = SecretXML(ephemeral=i, private=j)
            secret_xml_obj.uuid = uuid
            secret_xml_obj.volume = volume_path
            secret_xml_obj.usage = "volume"
            secret_xml_obj.description = "test"

            virsh.secret_define(secret_xml_obj.xml, debug=True)

    try:
        cmd_result = virsh.secret_list(secret_list_option, **virsh_dargs)
        output = cmd_result.stdout.strip()
        exit_status = cmd_result.exit_status
        if not status_error and exit_status != 0:
            test.fail("Run failed with right command")
        if status_error and exit_status == 0:
            test.fail("Run successfully with wrong command!")

        # Reture if secret-list failed
        if exit_status != 0:
            return

        # Check the result
        m1 = re.search(uuid_list[0], output)
        m2 = re.search(uuid_list[1], output)
        m3 = re.search(uuid_list[2], output)
        m4 = re.search(uuid_list[3], output)

        if secret_list_option.find("--no-ephemeral") >= 0:
            if m1 or m2:
                test.fail("Secret object %s, %s shouldn't be listed"
                          " out" % (uuid_list[0], uuid_list[1]))
            if secret_list_option.find("--private") >= 0:
                if not m3:
                    test.fail("Failed list secret object %s" %
                              uuid_list[2])
                if m4:
                    test.fail("Secret object %s shouldn't be listed"
                              " out" % uuid_list[3])
            elif secret_list_option.find("--no-private") >= 0:
                if not m4:
                    test.fail("Failed list secret object %s" %
                              uuid_list[3])
                if m3:
                    test.fail("Secret object %s shouldn't be listed"
                              " out" % uuid_list[2])
            else:
                if not m3 or not m4:
                    test.fail("Failed list secret object %s, %s" %
                              (uuid_list[2], uuid_list[3]))
        elif secret_list_option.find("--ephemeral") >= 0:
            if m3 or m4:
                test.fail("Secret object %s, %s shouldn't be listed"
                          " out" % (uuid_list[2], uuid_list[3]))
            if secret_list_option.find("--private") >= 0:
                if not m1:
                    test.fail("Failed list secret object %s" %
                              uuid_list[0])
                if m2:
                    test.fail("Secret object %s shouldn't be listed"
                              " out" % uuid_list[1])
            elif secret_list_option.find("--no-private") >= 0:
                if not m2:
                    test.fail("Failed list secret object %s" %
                              uuid_list[1])
                if m1:
                    test.fail("Secret object %s shouldn't be listed"
                              " out" % uuid_list[0])
            else:
                if not m1 or not m2:
                    test.fail("Failed list secret object %s, %s" %
                              (uuid_list[0], uuid_list[1]))
        elif secret_list_option.find("--private") >= 0:
            if not m1 or not m3:
                test.fail("Failed list secret object %s, %s" %
                          (uuid_list[0], uuid_list[2]))
            if m2 or m4:
                test.fail("Secret object %s and %s should't be "
                          "listed out"
                          % (uuid_list[1], uuid_list[3]))
        elif secret_list_option.find("--no-private") >= 0:
            if not m2 or not m4:
                test.fail("Failed list secret object %s, %s" %
                          (uuid_list[1], uuid_list[3]))
            if m1 or m3:
                test.fail("Secret object %s and %s shouldn't be "
                          "listed out" %
                          (uuid_list[0], uuid_list[2]))
        elif secret_list_option is None:
            if not m1 or not m2 or not m3 or not m4:
                test.fail("Fail to list all secret objects: %s" %
                          uuid_list)

    finally:
        #Cleanup
        for i in range(0, 4):
            virsh.secret_undefine(uuid_list[i], debug=True)
Exemplo n.º 42
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocol,
                               'source_name': iscsi_target + "/%s" % lun_num,
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_mode': disk_src_mode}
        else:
            test.cancel("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocol,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
                            % (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
Exemplo n.º 43
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd.encode()).decode()
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocol,
                               'source_name': iscsi_target + "/%s" % lun_num,
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_mode': disk_src_mode}
        else:
            test.cancel("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocol,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(test.tmpdir, snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
                            % (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        if vm.is_alive():
            vm.destroy()
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
Exemplo n.º 44
0
def prepare_ceph_disk(ceph_params, remote_virsh_dargs, test, runner_on_target):
    """
    Prepare one image on remote ceph server with enabled or disabled auth
    And expose it to VM by network access

    :param ceph_params: parameter to setup ceph.
    :param remote_virsh_dargs: parameter to remote virsh.
    :param test: test itself.
    """
    # Ceph server config parameters
    virsh_dargs = {'debug': True, 'ignore_status': True}
    prompt = ceph_params.get("prompt", r"[\#\$]\s*$")
    ceph_disk = "yes" == ceph_params.get("ceph_disk")
    mon_host = ceph_params.get('mon_host')
    client_name = ceph_params.get('client_name')
    client_key = ceph_params.get("client_key")
    vol_name = ceph_params.get("vol_name")
    disk_img = ceph_params.get("disk_img")
    key_file = ceph_params.get("key_file")
    disk_format = ceph_params.get("disk_format")
    key_opt = ""

    # Auth and secret config parameters.
    auth_user = ceph_params.get("auth_user")
    auth_key = ceph_params.get("auth_key")
    auth_type = ceph_params.get("auth_type")
    auth_usage = ceph_params.get("secret_usage")
    secret_uuid = ceph_params.get("secret_uuid")

    # Remote host parameters.
    remote_ip = ceph_params.get("server_ip")
    remote_user = ceph_params.get("server_user", "root")
    remote_pwd = ceph_params.get("server_pwd")

    # Clean up dirty secrets in test environments if there are.
    dirty_secret_list = get_secret_list()
    if dirty_secret_list:
        for dirty_secret_uuid in dirty_secret_list:
            virsh.secret_undefine(dirty_secret_uuid)

    # Install ceph-common package which include rbd command
    if utils_package.package_install(["ceph-common"]):
        if client_name and client_key:
            # Clean up dirty secrets on remote host.
            try:
                remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs)
                remote_dirty_secret_list = get_secret_list(remote_virsh)
                for dirty_secret_uuid in remote_dirty_secret_list:
                    remote_virsh.secret_undefine(dirty_secret_uuid)
            except (process.CmdError, remote.SCPError) as detail:
                raise exceptions.TestError(detail)
            finally:
                logging.debug('clean up secret on remote host')
                remote_virsh.close_session()

            with open(key_file, 'w') as f:
                f.write("[%s]\n\tkey = %s\n" % (client_name, client_key))
            key_opt = "--keyring %s" % key_file

            # Create secret xml
            sec_xml = secret_xml.SecretXML("no", "no")
            sec_xml.usage = auth_type
            sec_xml.usage_name = auth_usage
            sec_xml.uuid = secret_uuid
            sec_xml.xmltreefile.write()

            logging.debug("Secret xml: %s", sec_xml)
            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid is None:
                test.fail("Failed to get secret uuid")

            # Set secret value
            ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs)
            libvirt.check_exit_status(ret)

            # Create secret on remote host.
            local_path = sec_xml.xml
            remote_path = '/var/lib/libvirt/images/new_secret.xml'
            remote_folder = '/var/lib/libvirt/images'
            cmd = 'mkdir -p %s && chmod 777 %s && touch %s' % (
                remote_folder, remote_folder, remote_path)
            cmd_result = remote.run_remote_cmd(cmd, ceph_params,
                                               runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )

            if status:
                test.fail("Failed to run '%s' on the remote: %s" %
                          (cmd, output))
            remote.scp_to_remote(remote_ip,
                                 '22',
                                 remote_user,
                                 remote_pwd,
                                 local_path,
                                 remote_path,
                                 limit="",
                                 log_filename=None,
                                 timeout=600,
                                 interface=None)
            cmd = "/usr/bin/virsh secret-define --file %s" % remote_path
            cmd_result = remote.run_remote_cmd(cmd, ceph_params,
                                               runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )
            if status:
                test.fail("Failed to run '%s' on the remote: %s" %
                          (cmd, output))

            # Set secret value on remote host.
            cmd = "/usr/bin/virsh secret-set-value --secret %s --base64 %s" % (
                secret_uuid, auth_key)
            cmd_result = remote.run_remote_cmd(cmd, ceph_params,
                                               runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )

            if status:
                test.fail("Failed to run '%s' on the remote: %s" %
                          (cmd, output))

        # Delete the disk if it exists
        disk_src_name = "%s/%s" % (vol_name, disk_img)
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
               "{2}".format(mon_host, key_opt, disk_src_name))
        process.run(cmd, ignore_status=True, shell=True)

        # Convert the disk format
        first_disk_device = ceph_params.get('first_disk')
        blk_source = first_disk_device['source']
        disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" % (auth_user, auth_key))
        disk_cmd = ("rbd -m %s %s info %s || qemu-img convert"
                    " -O %s %s %s" % (mon_host, key_opt, disk_src_name,
                                      disk_format, blk_source, disk_path))
        process.run(disk_cmd, ignore_status=False, shell=True)
        return (key_opt, secret_uuid)
Exemplo n.º 45
0
def run(test, params, env):
    """
    Do test for vol-download and vol-upload

    Basic steps are
    1. Create pool with type defined in cfg
    2. Create image with writing data in it
    3. Get md5 value before operation
    4. Do vol-download/upload with options(offset, length)
    5. Check md5 value after operation
    """

    pool_type = params.get("vol_download_upload_pool_type")
    pool_name = params.get("vol_download_upload_pool_name")
    pool_target = params.get("vol_download_upload_pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("vol_download_upload_vol_name")
    file_name = params.get("vol_download_upload_file_name")
    file_path = os.path.join(data_dir.get_tmp_dir(), file_name)
    offset = params.get("vol_download_upload_offset")
    length = params.get("vol_download_upload_length")
    capacity = params.get("vol_download_upload_capacity")
    allocation = params.get("vol_download_upload_allocation")
    frmt = params.get("vol_download_upload_format")
    operation = params.get("vol_download_upload_operation")
    create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes"))
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")
    b_luks_encrypt = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.error("API acl test not supported in current"
                       " libvirt version.")
    try:
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name,
                     pool_type,
                     pool_target,
                     "volumetest",
                     pre_disk_vol=["50M"])
        # According to BZ#1138523, we need inpect the right name
        # (disk partition) for new volume
        if pool_type == "disk":
            vol_name = utlv.new_disk_vol_name(pool_name)
            if vol_name is None:
                test.error("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)
        if create_vol:
            if b_luks_encrypt:
                if not libvirt_version.version_compare(2, 0, 0):
                    test.cancel("LUKS format not supported in "
                                "current libvirt version")
                params['sec_volume'] = os.path.join(pool_target, vol_name)
                luks_sec_uuid = utlv.create_secret(params)
                ret = virsh.secret_set_value(luks_sec_uuid,
                                             encryption_password,
                                             encode=True)
                utlv.check_exit_status(ret)
                secret_uuids.append(luks_sec_uuid)
                vol_arg = {}
                vol_arg['name'] = vol_name
                vol_arg['capacity'] = int(capacity)
                vol_arg['allocation'] = int(allocation)
                create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg)
            else:
                pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name)

        vol_list = virsh.vol_list(pool_name).stdout.strip()
        # iscsi volume name is different from others
        if pool_type == "iscsi":
            vol_name = vol_list.split('\n')[2].split()[0]

        vol_path = virsh.vol_path(vol_name, pool_name,
                                  ignore_status=False).stdout.strip()
        logging.debug("vol_path is %s", vol_path)

        # Add command options
        if pool_type is not None:
            options = " --pool %s" % pool_name
        if offset is not None:
            options += " --offset %s" % offset
            offset = int(offset)
        else:
            offset = 0

        if length is not None:
            options += " --length %s" % length
            length = int(length)
        else:
            length = 0
        logging.debug("%s options are %s", operation, options)

        if operation == "upload":
            # write date to file
            write_file(file_path)

            # Set length for calculate the offset + length in the following
            # func get_pre_post_digest() and digest()
            if length == 0:
                length = 1048576

            def get_pre_post_digest():
                """
                Get pre region and post region digest if have offset and length
                :return: pre digest and post digest
                """
                # Get digest of pre region before offset
                if offset != 0:
                    digest_pre = digest(vol_path, 0, offset)
                else:
                    digest_pre = 0
                logging.debug("pre region digest read from %s 0-%s is %s",
                              vol_path, offset, digest_pre)
                # Get digest of post region after offset+length
                digest_post = digest(vol_path, offset + length, 0)
                logging.debug("post region digest read from %s %s-0 is %s",
                              vol_path, offset + length, digest_post)

                return (digest_pre, digest_post)

            # Get pre and post digest before operation for compare
            (ori_pre_digest, ori_post_digest) = get_pre_post_digest()
            ori_digest = digest(file_path, 0, 0)
            logging.debug("ori digest read from %s is %s", file_path,
                          ori_digest)

            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path,
                            ignore_status=True,
                            shell=True)

            # Do volume upload
            result = virsh.vol_upload(vol_name,
                                      file_path,
                                      options,
                                      unprivileged_user=unpri_user,
                                      uri=uri,
                                      debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                (aft_pre_digest, aft_post_digest) = get_pre_post_digest()
                aft_digest = digest(vol_path, offset, length)
                logging.debug("aft digest read from %s is %s", vol_path,
                              aft_digest)

                # Compare the pre and post part before and after
                if ori_pre_digest == aft_pre_digest and \
                   ori_post_digest == aft_post_digest:
                    logging.info("file pre and aft digest match")
                else:
                    test.fail("file pre or post digests do not"
                              "match, in %s", operation)

        if operation == "download":
            # Write date to volume
            write_file(vol_path)

            # Record the digest value before operation
            ori_digest = digest(vol_path, offset, length)
            logging.debug("original digest read from %s is %s", vol_path,
                          ori_digest)

            process.run("touch %s" % file_path, ignore_status=True, shell=True)
            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path,
                            ignore_status=True,
                            shell=True)

            # Do volume download
            result = virsh.vol_download(vol_name,
                                        file_path,
                                        options,
                                        unprivileged_user=unpri_user,
                                        uri=uri,
                                        debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                aft_digest = digest(file_path, 0, 0)
                logging.debug("new digest read from %s is %s", file_path,
                              aft_digest)

        if result.exit_status != 0:
            test.fail("Fail to %s volume: %s" % (operation, result.stderr))

        # Compare the change part on volume and file
        if ori_digest == aft_digest:
            logging.info("file digests match, volume %s suceed", operation)
        else:
            test.fail("file digests do not match, volume %s failed" %
                      operation)

    finally:
        pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest")
        for secret_uuid in set(secret_uuids):
            virsh.secret_undefine(secret_uuid)
        if os.path.isfile(file_path):
            os.remove(file_path)
Exemplo n.º 46
0
def run(test, params, env):
    """
    Test remote access with TCP, TLS connection
    """
    test_dict = dict(params)
    vm_name = test_dict.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = test_dict.get("start_vm", "no")

    # Server and client parameters
    server_ip = test_dict.get("server_ip")
    server_user = test_dict.get("server_user")
    server_pwd = test_dict.get("server_pwd")
    client_ip = test_dict.get("client_ip")
    client_user = test_dict.get("client_user")
    client_pwd = test_dict.get("client_pwd")
    server_cn = test_dict.get("server_cn")
    client_cn = test_dict.get("client_cn")
    target_ip = test_dict.get("target_ip", "")
    # generate remote IP
    if target_ip == "":
        if server_cn:
            target_ip = server_cn
        elif server_ip:
            target_ip = server_ip
        else:
            target_ip = target_ip
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    # Ceph disk parameters
    driver = test_dict.get("test_driver", "qemu")
    transport = test_dict.get("transport")
    plus = test_dict.get("conn_plus", "+")
    source_type = test_dict.get("vm_disk_source_type", "file")
    virsh_options = test_dict.get("virsh_options", "--verbose --live")
    vol_name = test_dict.get("vol_name")
    disk_src_protocol = params.get("disk_source_protocol")
    source_file = test_dict.get("disk_source_file")
    disk_format = test_dict.get("disk_format", "qcow2")
    mon_host = params.get("mon_host")
    ceph_key_opt = ""
    attach_disk = False
    # Disk XML file
    disk_xml = None
    # Define ceph_disk conditional variable
    ceph_disk = "yes" == test_dict.get("ceph_disk")

    # For --postcopy enable
    postcopy_options = test_dict.get("postcopy_options")
    if postcopy_options and not virsh_options.count(postcopy_options):
        virsh_options = "%s %s" % (virsh_options, postcopy_options)
        test_dict['virsh_options'] = virsh_options

    # For bi-directional and tls reverse test
    uri_port = test_dict.get("uri_port", ":22")
    uri_path = test_dict.get("uri_path", "/system")
    src_uri = test_dict.get("migration_source_uri", "qemu:///system")
    uri = "%s%s%s://%s%s%s" % (driver, plus, transport, target_ip, uri_port,
                               uri_path)
    test_dict["desuri"] = uri

    # Make sure all of parameters are assigned a valid value
    check_parameters(test, test_dict)
    # Set up SSH key

    #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22)
    remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user,
                                           server_pwd, r"[\#\$]\s*$")
    remote_session.close()
    #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22)

    # Set up remote ssh key and remote /etc/hosts file for bi-direction migration
    migrate_vm_back = "yes" == test_dict.get("migrate_vm_back", "no")
    if migrate_vm_back:
        ssh_key.setup_remote_ssh_key(server_ip, server_user, server_pwd)
        ssh_key.setup_remote_known_hosts_file(client_ip, server_ip,
                                              server_user, server_pwd)
    # Reset Vm state if needed
    if vm.is_alive() and start_vm == "no":
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Setup migration context
    migrate_setup = migration.MigrationTest()
    migrate_setup.migrate_pre_setup(test_dict["desuri"], params)

    # Install ceph-common on remote host machine.
    remote_ssh_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r"[\#\$]\s*$")
    if not utils_package.package_install(["ceph-common"], remote_ssh_session):
        test.error("Failed to install required packages on remote host")
    remote_ssh_session.close()
    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        # Get initial Selinux config flex bit
        LOCAL_SELINUX_ENFORCING_STATUS = utils_selinux.get_status()
        logging.info("previous local enforce :%s",
                     LOCAL_SELINUX_ENFORCING_STATUS)
        cmd_result = remote.run_remote_cmd('getenforce', params,
                                           runner_on_target)
        REMOTE_SELINUX_ENFORCING_STATUS = cmd_result.stdout_text
        logging.info("previous remote enforce :%s",
                     REMOTE_SELINUX_ENFORCING_STATUS)

        if ceph_disk:
            logging.info(
                "Put local SELinux in permissive mode when test ceph migrating"
            )
            utils_selinux.set_status("enforcing")

            logging.info("Put remote SELinux in permissive mode")
            cmd = "setenforce enforcing"
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )
            if status:
                test.Error("Failed to set SELinux " "in permissive mode")

            # Prepare ceph disk.
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            test_dict['key_file'] = key_file
            test_dict['first_disk'] = vm.get_first_disk_devices()
            ceph_key_opt, secret_uuid = prepare_ceph_disk(
                test_dict, remote_virsh_dargs, test, runner_on_target)
            host_ip = test_dict.get('mon_host')
            disk_image = test_dict.get('disk_img')

            # Build auth information.
            auth_attrs = {}
            auth_attrs['auth_user'] = params.get("auth_user")
            auth_attrs['secret_type'] = params.get("secret_type")
            auth_attrs['secret_uuid'] = secret_uuid
            build_disk_xml(vm_name,
                           disk_format,
                           host_ip,
                           disk_src_protocol,
                           vol_name,
                           disk_image,
                           auth=auth_attrs)

            vm_xml_cxt = process.run("virsh dumpxml %s" % vm_name,
                                     shell=True).stdout_text
            logging.debug("The VM XML with ceph disk source: \n%s", vm_xml_cxt)
            try:
                if vm.is_dead():
                    vm.start()
            except virt_vm.VMStartError as e:
                logging.info("Failed to start VM")
                test.fail("Failed to start VM: %s" % vm_name)

        # Ensure the same VM name doesn't exist on remote host before migrating.
        destroy_vm_cmd = "virsh destroy %s" % vm_name
        remote.run_remote_cmd(cmd, params, runner_on_target)

        # Trigger migration
        migrate_vm(test, test_dict)

        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()
            # Pre migration setup for local machine
            migrate_setup.migrate_pre_setup(src_uri, params)
            cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri)
            logging.debug("Start migrating: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )
            logging.info(output)
            if status:
                destroy_cmd = "virsh destroy %s" % vm_name
                remote.run_remote_cmd(destroy_cmd, params, runner_on_target)
                test.fail("Failed to run '%s' on remote: %s" % (cmd, output))
    finally:
        logging.info("Recovery test environment")
        # Clean up of pre migration setup for local machine
        if migrate_vm_back:
            migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True)
        # Ensure VM can be cleaned up on remote host even migrating fail.
        destroy_vm_cmd = "virsh destroy %s" % vm_name
        remote.run_remote_cmd(destroy_vm_cmd, params, runner_on_target)

        logging.info("Recovery VM XML configuration")
        vmxml_backup.sync()
        logging.debug("The current VM XML:\n%s", vmxml_backup.xmltreefile)

        # Clean up ceph environment.
        if disk_src_protocol == "rbd":
            # Clean up secret
            secret_list = get_secret_list()
            if secret_list:
                for secret_uuid in secret_list:
                    virsh.secret_undefine(secret_uuid)
            # Clean up dirty secrets on remote host if testing involve in ceph auth.
            client_name = test_dict.get('client_name')
            client_key = test_dict.get("client_key")
            if client_name and client_key:
                try:
                    remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs)
                    remote_dirty_secret_list = get_secret_list(remote_virsh)
                    for dirty_secret_uuid in remote_dirty_secret_list:
                        remote_virsh.secret_undefine(dirty_secret_uuid)
                except (process.CmdError, remote.SCPError) as detail:
                    test.Error(detail)
                finally:
                    remote_virsh.close_session()
            # Delete the disk if it exists.
            disk_src_name = "%s/%s" % (vol_name, test_dict.get('disk_img'))
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, ceph_key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        if LOCAL_SELINUX_ENFORCING_STATUS:
            logging.info("Restore SELinux in original mode")
            utils_selinux.set_status(LOCAL_SELINUX_ENFORCING_STATUS)
        if REMOTE_SELINUX_ENFORCING_STATUS:
            logging.info("Put remote SELinux in original mode")
            cmd = "yes yes | setenforce %s" % REMOTE_SELINUX_ENFORCING_STATUS
            remote.run_remote_cmd(cmd, params, runner_on_target)

        # Remove known hosts on local host
        cmd = "ssh-keygen -R  %s" % server_ip
        process.run(cmd, ignore_status=True, shell=True)

        # Remove known hosts on remote host
        cmd = "ssh-keygen -R  %s" % client_ip
        remote.run_remote_cmd(cmd, params, runner_on_target)
Exemplo n.º 47
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    additional_xml_file = os.path.join(data_dir.get_tmp_dir(),
                                       "additional_disk.xml")

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(disk_src_host.split(),
                                              disk_src_port.split()):
                source_host.append({'name': host_name, 'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = (
                    "--auth-type %s --auth-username %s --secret-usage '%s'" %
                    (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(
                    pool_name, mon_host, disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name,
                               vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name,
                                 "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = [
            "disk name='%s' snapshot='external' type='file'" % target_dev
        ]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")

        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)".format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True,
                                shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt,
                                 os.path.join(disk_src_pool, vol_name),
                                 snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2")
        snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3")
        snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4")
        snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4")
        snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5")
        snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {
            "s1":
            "s1 --disk-only --no-metadata",
            "s2":
            "s2 --memspec %s --no-metadata" % snapshot2_file,
            "s3":
            "s3 --memspec %s --no-metadata --live" % snapshot3_file,
            "s4":
            "s4 --memspec %s --diskspec vda,file=%s --no-metadata" %
            (snapshot4_file, snapshot4_disk_file),
            "s5":
            "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" %
            (snapshot5_file, snapshot5_disk_file)
        }
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name,
                                           snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(
                    first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = results_stdout_52lts(
            secret_list_result).strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume",
                                           "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get(
        "test_disk_internal_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol",
                                                    "no")
    disk_snapshot_with_sanlock = "yes" == params.get(
        "disk_internal_with_sanlock", "no")
    auth_place_in_source = params.get("auth_place_in_source")

    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    # Create config file if it doesn't exist
    ceph_cfg = ceph.create_config_file(mon_host)

    # After libvirt 3.9.0, auth element can be put into source part.
    if auth_place_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel(
            "place auth in source is not supported in current libvirt version")

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name,
                                        guest_name,
                                        True,
                                        timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(data_dir.get_tmp_dir(),
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        if not libvirt_version.version_compare(5, 0, 0):
            unsupported_err.append('Could not create file: Permission denied')
            unsupported_err.append('Permission denied')
        else:
            unsupported_err.append(
                'unsupported configuration: external snapshot ' +
                'for readonly disk vdb is not supported')
    if test_disk_internal_snapshot:
        unsupported_err.append(
            'unsupported configuration: internal snapshot for disk ' +
            'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config = LibvirtSanLockConfig()
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict[
                "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict[
                "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict[
                "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict[
                "sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict[
                "chown sanlock:sanlock " +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict[
                "sanlock client add_lockspace -s TEST_LS:1:" +
                "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {
                'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'
            }
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" % (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" % (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port
        })
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt, disk_src_name,
                                          disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {
                "name": vol_name,
                "capacity": int(vol_cap),
                "capacity_unit": vol_cap_unit,
                "format": disk_format
            }

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = (
                "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format,
                               img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}' %
                            (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' %
                                (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, and then wait the OS boot.
        # Otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login().close()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            # After 3.9.0,the auth element can be place in source part.
            if auth_place_in_source:
                params.update({"auth_in_source": auth_place_in_source})
            xml_file = libvirt.create_disk_xml(params)
            if additional_guest:
                # Copy xml_file for additional guest VM.
                shutil.copyfile(xml_file, additional_xml_file)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                    additional_vm.wait_for_login().close()
                ret = virsh.attach_device(guest_name,
                                          additional_xml_file,
                                          "",
                                          debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(
                    vm, targetdev, old_parts, read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm" " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.cancel(str(details))
        else:
            test.fail("VM failed to start." "Error: %s" % str(details))
    finally:
        # Remove ceph configure file if created.
        if ceph_cfg:
            os.remove(ceph_cfg)
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt,
                             os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(
                       mon_host, key_opt,
                       os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
Exemplo n.º 48
0
 def remove(self, name):
     secret = name
     res = virsh.secret_undefine(secret['uuid'])
     if res.exit_status:
         raise Exception(str(res))
            # Start the VM and check status.
            vm.start()
            if status_error:
                raise error.TestFail("VM started unexpectedly.")

            if not check_in_vm(vm, device_target):
                raise error.TestFail("Check encryption disk in VM failed")
        except virt_vm.VMStartError, e:
            if status_error:
                logging.debug("VM failed to start as expected."
                              "Error: %s" % str(e))
                pass
            else:
                raise error.TestFail("VM failed to start."
                                     "Error: %s" % str(e))

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for i in sec_uuid:
            virsh.secret_undefine(i, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if virsh.pool_state_dict().has_key(pool_name):
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
                    test.fail("In hotplug scenario, VM should "
                              "start successfully but not."
                              "Error: %s", str(e))
                else:
                    logging.debug("VM failed to start as expected."
                                  "Error: %s", str(e))
            else:
                # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
                # longer supported.
                err_msgs = ("AES-CBC encrypted qcow2 images is"
                            " no longer supported in system emulators")
                if str(e).count(err_msgs):
                    test.cancel(err_msgs)
                else:
                    test.fail("VM failed to start."
                              "Error: %s" % str(e))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if virsh.pool_state_dict().has_key(pool_name):
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)