Exemple #1
0
    def create_iscsi_pool():
        """
        Setup iSCSI target,and create one iSCSI pool.
        """
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size='1G',
            chap_user="",
            chap_passwd="",
            portal_ip=disk_src_host)
        # Define an iSCSI pool xml to create it
        pool_src_xml = pool_xml.SourceXML()
        pool_src_xml.host_name = pool_src_host
        pool_src_xml.device_path = iscsi_target
        poolxml = pool_xml.PoolXML(pool_type=pool_type)
        poolxml.name = pool_name
        poolxml.set_source(pool_src_xml)
        poolxml.target_path = "/dev/disk/by-path"

        # Create iSCSI pool.
        virsh.pool_destroy(pool_name, **virsh_dargs)
        cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
        libvirt.check_exit_status(cmd_result)
Exemple #2
0
 def cleanup_pool(user_pool=False,
                  pool_name=pool_name,
                  pool_target=pool_target):
     """
     Clean up libvirt pool
     """
     if output_uri == "qemu:///session" or user_pool:
         virsh.pool_destroy(pool_name,
                            unprivileged_user=v2v_user,
                            debug=True)
         target_path = os.path.join("/home", v2v_user, pool_target)
         cmd = su_cmd + "'rm -rf %s'" % target_path
         process.system(cmd, verbose=True)
     else:
         pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)
 def cleanup_pool(pool_name, pool_type, pool_target):
     """
     Delete vols, destroy the created pool and restore the env
     """
     if pool_type in ["dir", "netfs"]:
         vols = get_vol_list(pool_name)
         for vol in vols:
             result = virsh.vol_delete(vol, pool_name)
             if result.exit_status:
                 raise error.TestFail("Command virsh vol-delete failed:\n%s"
                                      % result.stderr)
         else:
             logging.debug("Delete volume %s from pool %s", vol, pool_name)
     if not virsh.pool_destroy(pool_name):
         raise error.TestFail("Command virsh pool-destroy failed")
     else:
         logging.debug("Destroy pool %s", pool_name)
     if pool_type == "netfs":
         shutil.move("/etc/exports.virt", "/etc/exports")
         utils.run("service nfs restart")
         nfs_path = os.path.join(test.tmpdir, nfs_server_dir)
         if os.path.exists(nfs_path):
             shutil.rmtree(nfs_path)
     if pool_type == "logical":
         cmd = "pvs |grep vg_logical|awk '{print $1}'"
         pv = utils.system_output(cmd)
         utils.run("vgremove -f vg_logical")
         utils.run("pvremove %s" % pv)
     if pool_type in ["dir", "fs", "netfs"]:
         pool_target = os.path.join(test.tmpdir, pool_target)
         if os.path.exists(pool_target):
             shutil.rmtree(pool_target)
 def cleanup_pool(pool_name, pool_type, pool_target):
     """
     Delete vols, destroy the created pool and restore the env
     """
     if pool_type in ["dir", "netfs"]:
         vols = get_vol_list(pool_name)
         for vol in vols:
             result = virsh.vol_delete(vol, pool_name)
             if result.exit_status:
                 raise error.TestFail(
                     "Command virsh vol-delete failed:\n%s" % result.stderr)
         else:
             logging.debug("Delete volume %s from pool %s", vol, pool_name)
     if not virsh.pool_destroy(pool_name):
         raise error.TestFail("Command virsh pool-destroy failed")
     else:
         logging.debug("Destroy pool %s", pool_name)
     if pool_type == "netfs":
         shutil.move("/etc/exports.virt", "/etc/exports")
         utils.run("service nfs restart")
         nfs_path = os.path.join(test.tmpdir, nfs_server_dir)
         if os.path.exists(nfs_path):
             shutil.rmtree(nfs_path)
     if pool_type == "logical":
         cmd = "pvs |grep vg_logical|awk '{print $1}'"
         pv = utils.system_output(cmd)
         utils.run("vgremove -f vg_logical")
         utils.run("pvremove %s" % pv)
     if pool_type in ["dir", "fs", "netfs"]:
         pool_target = os.path.join(test.tmpdir, pool_target)
         if os.path.exists(pool_target):
             shutil.rmtree(pool_target)
Exemple #5
0
def state_test():
    states = [ServiceState(), FileState(), DirState(), DomainState(),
              NetworkState(), PoolState(), SecretState(), MountState()]
    for state in states:
        state.backup()
    utils.run('echo hello > /etc/exports')
    virsh.start('virt-tests-vm1')
    virsh.net_autostart('default', '--disable')
    virsh.pool_destroy('mount')
    utils.run('rm /var/lib/virt_test/images/hello')
    utils.run('mkdir /var/lib/virt_test/images/hi')
    utils_libvirtd.Libvirtd().stop()
    utils_selinux.set_status('permissive')
    for state in states:
        lines = state.check(recover=True)
        for line in lines:
            print line
    def create_iscsi_pool():
        """
        Setup iSCSI target,and create one iSCSI pool.
        """
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user="",
                                                               chap_passwd="",
                                                               portal_ip=disk_src_host)
        # Define an iSCSI pool xml to create it
        pool_src_xml = pool_xml.SourceXML()
        pool_src_xml.host_name = pool_src_host
        pool_src_xml.device_path = iscsi_target
        poolxml = pool_xml.PoolXML(pool_type=pool_type)
        poolxml.name = pool_name
        poolxml.set_source(pool_src_xml)
        poolxml.target_path = "/dev/disk/by-path"

        # Create iSCSI pool.
        virsh.pool_destroy(pool_name, **virsh_dargs)
        cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
        libvirt.check_exit_status(cmd_result)
Exemple #7
0
    def remove(self, name):
        """
        Remove target pool _pool_.

        :param pool: Target pool to be removed.
        """
        pool = name
        if pool['state'] == 'running':
            res = virsh.pool_destroy(pool['name'])
            if not res:
                raise Exception(str(res))
        if pool['persistent'] == 'yes':
            res = virsh.pool_undefine(pool['name'])
            if res.exit_status:
                raise Exception(str(res))
Exemple #8
0
 def cleanup_pool(pool_name, pool_target):
     """
     Destroys, undefines and delete the pool target
     """
     result = virsh.pool_destroy(pool_name, ignore_status=True)
     if not result:
         raise error.TestFail("Command virsh pool-destroy failed")
     result = virsh.pool_undefine(pool_name, ignore_status=True)
     if result.exit_status != 0:
         raise error.TestFail("Command virsh pool-undefine failed:\n%s" %
                              result.stderr.strip())
     try:
         logging.debug("Deleting the pool target: %s directory", pool_target)
         shutil.rmtree(pool_target)
     except OSError, detail:
         raise error.TestFail("Failed to delete the pool target directory"
                              "%s:\n %s" % (pool_target, detail) )
Exemple #9
0
 def cleanup_pool(pool_name, pool_target):
     """
     Destroys, undefines and delete the pool target
     """
     result = virsh.pool_destroy(pool_name, ignore_status=True)
     if not result:
         raise error.TestFail("Command virsh pool-destroy failed")
     result = virsh.pool_undefine(pool_name, ignore_status=True)
     if result.exit_status != 0:
         raise error.TestFail("Command virsh pool-undefine failed:\n%s" %
                              result.stderr.strip())
     try:
         logging.debug("Deleting the pool target: %s directory",
                       pool_target)
         shutil.rmtree(pool_target)
     except OSError, detail:
         raise error.TestFail("Failed to delete the pool target directory"
                              "%s:\n %s" % (pool_target, detail))
def run(test, params, env):
    """
    Test pool command:virsh pool_autostart

    1) Define a given type pool
    2) Mark pool as autostart
    3) Restart libvirtd and check pool
    4) Destroy the pool
    5) Unmark pool as autostart
    6) Repeate step(3)
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    ip_protocal = params.get("ip_protocal", "ipv4")
    pool_ref = params.get("pool_ref", "name")
    pool_uuid = params.get("pool_uuid", "")
    invalid_source_path = params.get("invalid_source_path", "")
    status_error = "yes" == params.get("status_error", "no")
    readonly_mode = "yes" == params.get("readonly_mode", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "yes")
    disk_type = params.get("disk_type", "")
    vg_name = params.get("vg_name", "")
    lv_name = params.get("lv_name", "")
    update_policy = params.get("update_policy")

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    if pool_target is "":
        pool_target = os.path.join(test.tmpdir, pool_target)

    # The file for dumped pool xml
    p_xml = os.path.join(test.tmpdir, "pool.xml.tmp")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        test.fail("Pool %s already exist" % pool_name)

    def check_pool(pool_name,
                   pool_type,
                   checkpoint,
                   expect_value="",
                   expect_error=False):
        """
        Check the pool after autostart it

        :param pool_name: Name of the pool.
        :param pool_type: Type of the pool.
        :param checkpoint: Which part for checking.
        :param expect_value: Expected value.
        :param expect_error: Boolen value, expect command success or fail
        """
        libvirt_pool = libvirt_storage.StoragePool()
        virsh.pool_list(option="--all", debug=True)
        if checkpoint == 'State':
            actual_value = libvirt_pool.pool_state(pool_name)
        if checkpoint == 'Autostart':
            actual_value = libvirt_pool.pool_autostart(pool_name)
        if actual_value != expect_value:
            if not expect_error:
                if checkpoint == 'State' and pool_type in ("dir", "scsi"):
                    debug_msg = "Dir pool should be always active when libvirtd restart. "
                    debug_msg += "See https://bugzilla.redhat.com/show_bug.cgi?id=1238610"
                    logging.debug(debug_msg)
                else:
                    test.fail("Pool %s isn't %s as expected" %
                              (checkpoint, expect_value))
            else:
                logging.debug("Pool %s is %s as expected", checkpoint,
                              actual_value)

    def change_source_path(new_path, update_policy="set"):
        n_poolxml = pool_xml.PoolXML()
        n_poolxml = n_poolxml.new_from_dumpxml(pool_name)
        s_xml = n_poolxml.get_source()
        s_xml.device_path = new_path
        if update_policy == "set":
            n_poolxml.set_source(s_xml)
        elif update_policy == "add":
            n_poolxml.add_source("device", {"path": new_path})
        else:
            test.error("Unsupported policy type")
        logging.debug("After change_source_path:\n%s" %
                      open(n_poolxml.xml).read())
        return n_poolxml

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    kwargs = {
        'image_size': '1G',
        'pre_disk_vol': ['100M'],
        'source_name': source_name,
        'source_path': source_path,
        'source_format': source_format,
        'persistent': True,
        'ip_protocal': ip_protocal,
        'emulated_image': "emulated-image",
        'pool_target': pool_target
    }
    params.update(kwargs)
    pool = pool_name
    clean_mount = False
    new_device = None
    try:
        if pre_def_pool:
            # Step(1)
            # Pool define
            pvt.pre_pool(**params)
            # Remove the partition for disk pool
            # For sometimes the partition will cause pool start failed
            if pool_type == "disk":
                virsh.pool_build(pool_name, "--overwrite", debug=True)
            # Get pool uuid:
            if pool_ref == "uuid" and not pool_uuid:
                pool = pool_ins.get_pool_uuid(pool_name)

            # Setup logical block device
            # Change pool source path
            # Undefine pool
            # Define pool with new xml
            # Start pool
            if update_policy:
                new_device = utlv.setup_or_cleanup_iscsi(True)
                lv_utils.vg_create(vg_name, new_device)
                new_device = utlv.create_local_disk(disk_type,
                                                    size="0.5",
                                                    vgname=vg_name,
                                                    lvname=lv_name)
                new_path = new_device
                if invalid_source_path:
                    new_path = invalid_source_path
                if pool_type == "fs":
                    utlv.mkfs(new_device, source_format)
                n_poolxml = change_source_path(new_path, update_policy)
                p_xml = n_poolxml.xml
                if not virsh.pool_undefine(pool_name):
                    test.fail("Undefine pool %s failed" % pool_name)
                if not virsh.pool_define(p_xml):
                    test.fail("Define pool %s from %s failed" %
                              (pool_name, p_xml))
                logging.debug("Start pool %s" % pool_name)
                result = virsh.pool_start(pool_name,
                                          ignore_status=True,
                                          debug=True)
                utlv.check_exit_status(result, status_error)
                # Mount a valid fs to pool target
                if pool_type == "fs":
                    source_list = []
                    mnt_cmd = ""
                    pool_target = n_poolxml.target_path
                    if invalid_source_path:
                        source_list.append(new_device)
                    else:
                        s_devices = n_poolxml.xmltreefile.findall(
                            "//source/device")
                        for dev in s_devices:
                            source_list.append(dev.get('path'))
                    try:
                        for src in source_list:
                            mnt_cmd = "mount %s %s" % (src, pool_target)
                            if not process.system(mnt_cmd, shell=True):
                                clean_mount = True
                    except process.CmdError:
                        test.error("Failed to run %s" % mnt_cmd)

        # Step(2)
        # Pool autostart
        logging.debug("Try to mark pool %s as autostart" % pool_name)
        result = virsh.pool_autostart(pool,
                                      readonly=ro_flag,
                                      ignore_status=True,
                                      debug=True)
        if not pre_def_pool:
            utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            check_pool(pool_name,
                       pool_type,
                       checkpoint='Autostart',
                       expect_value="yes",
                       expect_error=status_error)

            # Step(3)
            # Restart libvirtd and check pool status
            logging.info("Try to restart libvirtd")
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            check_pool(pool_name,
                       pool_type,
                       checkpoint="State",
                       expect_value="active",
                       expect_error=status_error)

            # Step(4)
            # Pool destroy
            if pool_ins.is_pool_active(pool_name):
                virsh.pool_destroy(pool_name)
                logging.debug("Pool %s destroyed" % pool_name)

            # Step(5)
            # Pool autostart disable
            logging.debug("Try to unmark pool %s as autostart" % pool_name)
            result = virsh.pool_autostart(pool,
                                          extra="--disable",
                                          debug=True,
                                          ignore_status=True)
            if not pre_def_pool:
                utlv.check_exit_status(result, status_error)
            if not result.exit_status:
                check_pool(pool_name,
                           pool_type,
                           checkpoint='Autostart',
                           expect_value="no",
                           expect_error=status_error)

                # Repeat step (3)
                logging.debug("Try to restart libvirtd")
                libvirtd = utils_libvirtd.Libvirtd()
                libvirtd.restart()
                check_pool(pool_name,
                           pool_type,
                           checkpoint='State',
                           expect_value="inactive",
                           expect_error=status_error)
    finally:
        # Clean up
        logging.debug("Try to clean up env")
        try:
            if clean_mount is True:
                for src in source_list:
                    process.system("umount %s" % pool_target)
            if pre_def_pool:
                pvt.cleanup_pool(**params)
            if new_device:
                utlv.delete_local_disk(disk_type,
                                       vgname=vg_name,
                                       lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                utlv.setup_or_cleanup_iscsi(False)
            if os.path.exists(p_xml):
                os.remove(p_xml)
        except exceptions.TestFail as details:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.error(str(details))
Exemple #11
0
        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        detach_status = virsh.detach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            logging.debug("Cleanup disk xml")
            data_dir.clean_tmp_files()
Exemple #12
0
            # Start the VM and check status.
            vm.start()
            if status_error:
                raise error.TestFail("VM started unexpectedly.")

            if not check_in_vm(vm, device_target, old_parts):
                raise error.TestFail("Check encryption disk in VM failed")
        except virt_vm.VMStartError, e:
            if status_error:
                logging.debug("VM failed to start as expected."
                              "Error: %s" % str(e))
                pass
            else:
                raise error.TestFail("VM failed to start."
                                     "Error: %s" % str(e))

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for i in sec_uuid:
            virsh.secret_undefine(i, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if virsh.pool_state_dict().has_key(pool_name):
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
Exemple #13
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    vg_name = params.get("virt_disk_vg_name", "vg_test_0")
    lv_name = params.get("virt_disk_lv_name", "lv_test_0")
    driver_packed = params.get("driver_packed", "on")
    disk_packed = "yes" == params.get("disk_packed", "no")
    scsi_packed = "yes" == params.get("scsi_packed", "no")

    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    if pool_type == "iscsi-direct":
        if not libvirt_version.version_compare(4, 7, 0):
            test.cancel("iscsi-direct pool is not supported in"
                        " current libvirt version.")
    if ((disk_packed or scsi_packed)
            and not libvirt_version.version_compare(6, 3, 0)):
        test.cancel("The virtio packed attribute is not supported in"
                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Fix no more PCI slots issue in certain cases.
    vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    machine_type = params.get("machine_type", "pc")
    if machine_type == 'q35':
        vm_dump_xml.remove_all_device_by_type('controller')
        machine_list = vm_dump_xml.os.machine.split("-")
        vm_dump_xml.set_os_attrs(
            **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
        q35_pcie_dict0 = {
            'controller_model': 'pcie-root',
            'controller_type': 'pci',
            'controller_index': 0
        }
        q35_pcie_dict1 = {
            'controller_model': 'pcie-root-port',
            'controller_type': 'pci'
        }
        vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict0))
        # Add enough controllers to match multiple times disk attaching requirements
        for i in list(range(1, 12)):
            q35_pcie_dict1.update({'controller_index': "%d" % i})
            vm_dump_xml.add_device(
                libvirt.create_controller_xml(q35_pcie_dict1))
        vm_dump_xml.sync()

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        if disk_type == "block":
            iscsi_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=True,
                image_size="1G",
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        else:
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size='1G',
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = pool_target
            if chap_auth:
                pool_src_xml.auth_type = "chap"
                pool_src_xml.auth_username = chap_user
                pool_src_xml.secret_usage = secret_usage_target
                poolxml.set_source(pool_src_xml)
            if pool_type == "iscsi-direct":
                iscsi_initiator = params.get('iscsi_initiator')
                pool_src_xml.iqn_name = iscsi_initiator
                poolxml.set_source(pool_src_xml)
            # Create iscsi/iscsi-direct pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            xml = virsh.pool_dumpxml(disk_src_pool)
            logging.debug("Pool '%s' XML:\n%s", disk_src_pool, xml)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            if pool_type == "iscsi":
                process.run('qemu-img create -f qcow2 %s %s' %
                            (vol_path, '100M'),
                            shell=True,
                            verbose=True)
            else:
                # Get iscsi URL to create a qcow2 volume disk
                disk_path = ("iscsi://[%s]/%s/%s" %
                             (disk_src_host, iscsi_target, lun_num))
                blk_source = "/mnt/test.qcow2"
                process.run('qemu-img create -f qcow2 %s %s' %
                            (blk_source, '100M'),
                            shell=True,
                            verbose=True)
                process.run('qemu-img convert -O qcow2 %s %s' %
                            (blk_source, disk_path),
                            shell=True,
                            verbose=True)

        # Create block device
        if disk_type == "block":
            logging.debug("iscsi dev name: %s", iscsi_target)
            lv_utils.vg_create(vg_name, iscsi_target)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            if pool_type == "iscsi":
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2',
                    'source_mode': disk_src_mode
                }
            # iscsi-direct pool don't include source_mode option
            else:
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2'
                }
        elif disk_type == "block":
            disk_params_src = {
                'source_file': device_source,
                'driver_type': 'raw'
            }
            # Start guest with packed attribute in disk
            if disk_packed:
                disk_params_src['driver_packed'] = driver_packed
            # Start guest with packed attribute in scsi controller
            if scsi_packed:
                scsi_controller = Controller("controller")
                scsi_controller.type = "scsi"
                scsi_controller.model = "virtio-scsi"
                scsi_controller.driver = {'packed': driver_packed}
                vm_dump_xml.add_device(scsi_controller)
                vm_dump_xml.sync()
        else:
            test.cancel("Unsupported disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth and disk_type != "volume":
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            # virsh snapshot-revert is not supported on combined internal and external snapshots
            # see more details from,https://bugzilla.redhat.com/show_bug.cgi?id=1733173
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            virsh.snapshot_create_as(vm_name,
                                     snapshot_name2,
                                     ignore_status=False,
                                     debug=True)

            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "start_with_packed":
            expect_xml_line = "packed=\"%s\"" % driver_packed
            libvirt.check_dumpxml(vm, expect_xml_line)
            expect_qemu_line = "packed=%s" % driver_packed
            libvirt.check_qemu_cmd_line(expect_qemu_line)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupported operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name,
                                       disk_target,
                                       wait_remove_event=True)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if disk_type == "block":
                clean_up_lvm(iscsi_target, vg_name, lv_name)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    additional_xml_file = os.path.join(data_dir.get_tmp_dir(),
                                       "additional_disk.xml")

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(disk_src_host.split(),
                                              disk_src_port.split()):
                source_host.append({'name': host_name, 'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = (
                    "--auth-type %s --auth-username %s --secret-usage '%s'" %
                    (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(
                    pool_name, mon_host, disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name,
                               vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name,
                                 "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = [
            "disk name='%s' snapshot='external' type='file'" % target_dev
        ]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")

        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)".format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True,
                                shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt,
                                 os.path.join(disk_src_pool, vol_name),
                                 snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2")
        snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3")
        snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4")
        snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4")
        snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5")
        snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {
            "s1":
            "s1 --disk-only --no-metadata",
            "s2":
            "s2 --memspec %s --no-metadata" % snapshot2_file,
            "s3":
            "s3 --memspec %s --no-metadata --live" % snapshot3_file,
            "s4":
            "s4 --memspec %s --diskspec vda,file=%s --no-metadata" %
            (snapshot4_file, snapshot4_disk_file),
            "s5":
            "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" %
            (snapshot5_file, snapshot5_disk_file)
        }
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name,
                                           snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(
                    first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = results_stdout_52lts(
            secret_list_result).strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume",
                                           "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get(
        "test_disk_internal_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol",
                                                    "no")
    disk_snapshot_with_sanlock = "yes" == params.get(
        "disk_internal_with_sanlock", "no")
    auth_place_in_source = params.get("auth_place_in_source")

    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    # Create config file if it doesn't exist
    ceph_cfg = ceph.create_config_file(mon_host)

    # After libvirt 3.9.0, auth element can be put into source part.
    if auth_place_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel(
            "place auth in source is not supported in current libvirt version")

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name,
                                        guest_name,
                                        True,
                                        timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(data_dir.get_tmp_dir(),
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        if not libvirt_version.version_compare(5, 0, 0):
            unsupported_err.append('Could not create file: Permission denied')
            unsupported_err.append('Permission denied')
        else:
            unsupported_err.append(
                'unsupported configuration: external snapshot ' +
                'for readonly disk vdb is not supported')
    if test_disk_internal_snapshot:
        unsupported_err.append(
            'unsupported configuration: internal snapshot for disk ' +
            'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config = LibvirtSanLockConfig()
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict[
                "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict[
                "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict[
                "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict[
                "sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict[
                "chown sanlock:sanlock " +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict[
                "sanlock client add_lockspace -s TEST_LS:1:" +
                "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {
                'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'
            }
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" % (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" % (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port
        })
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt, disk_src_name,
                                          disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {
                "name": vol_name,
                "capacity": int(vol_cap),
                "capacity_unit": vol_cap_unit,
                "format": disk_format
            }

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = (
                "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format,
                               img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}' %
                            (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' %
                                (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, and then wait the OS boot.
        # Otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login().close()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            # After 3.9.0,the auth element can be place in source part.
            if auth_place_in_source:
                params.update({"auth_in_source": auth_place_in_source})
            xml_file = libvirt.create_disk_xml(params)
            if additional_guest:
                # Copy xml_file for additional guest VM.
                shutil.copyfile(xml_file, additional_xml_file)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                    additional_vm.wait_for_login().close()
                ret = virsh.attach_device(guest_name,
                                          additional_xml_file,
                                          "",
                                          debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(
                    vm, targetdev, old_parts, read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm" " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.cancel(str(details))
        else:
            test.fail("VM failed to start." "Error: %s" % str(details))
    finally:
        # Remove ceph configure file if created.
        if ceph_cfg:
            os.remove(ceph_cfg)
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt,
                             os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(
                       mon_host, key_opt,
                       os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
Exemple #15
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    new_pool_name = params.get("new_pool_name", "")
    build_option = params.get("build_option", "")
    iscsi_initiator = params.get("iscsi_initiator", "")
    same_source_test = "yes" == params.get("same_source_test", "no")
    customize_initiator_iqn = "yes" == params.get("customize_initiator_iqn",
                                                  "no")
    # The file for dumped pool xml
    poolxml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("volume_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get('ip_protocal', 'ipv4')

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")
    if not libvirt_version.version_compare(4, 7, 0):
        if pool_type == "iscsi-direct":
            test.cancel("iSCSI-direct pool is not supported in current"
                        "libvirt version.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)",
                            str(result.stdout.strip()))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            test.fail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            test.fail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)", str(result.stdout.strip()))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find volume '%s' in pool '%s'.", vol_name,
                          pool_name)
        else:
            test.fail("Not find volume '%s' in pool '%s'." %
                      (vol_name, pool_name))

    def is_in_range(actual, expected, error_percent):
        deviation = 100 - (100 * (float(actual) / float(expected)))
        logging.debug("Deviation: %0.2f%%", float(deviation))
        return float(deviation) <= float(error_percent)

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            test.fail("Pool info dictionary is needed.")
        val_tup = ('Capacity', 'Allocation', 'Available')
        if check_point in val_tup and float(value.split()[0]):
            # As from bytes to GiB, could cause deviation, and it should not
            # exceed 1 percent.
            if is_in_range(float(pool_info[check_point].split()[0]),
                           float(value.split()[0]), 1):
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                test.fail("Pool '%s' isn't '%s'." % (check_point, value))
        else:
            if pool_info[check_point] == value:
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                test.fail("Pool '%s' isn't '%s'." % (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    kwargs = {
        'image_size': '1G',
        'pre_disk_vol': ['100M'],
        'source_name': source_name,
        'source_path': source_path,
        'source_format': source_format,
        'persistent': True,
        'ip_protocal': ip_protocal,
        'emulated_image': "emulated-image",
        'pool_target': pool_target,
        'iscsi_initiator': iscsi_initiator
    }
    params.update(kwargs)

    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(**params)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=poolxml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Update pool name
        if new_pool_name:
            if "/" in new_pool_name:
                new_pool_name = new_pool_name.replace("/", "\/")
                logging.debug(new_pool_name)
            p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
            p_xml.name = new_pool_name
            del p_xml.uuid
            poolxml = p_xml.xml
            logging.debug("XML after update pool name:\n%s" % p_xml)

        # Update host name
        if same_source_test:
            s_xml = p_xml.get_source()
            s_xml.host_name = "192.168.1.1"
            p_xml.set_source(s_xml)
            poolxml = p_xml.xml
            logging.debug("XML after update host name:\n%s" % p_xml)

        if customize_initiator_iqn:
            initiator_iqn = params.get("initiator_iqn",
                                       "iqn.2018-07.com.virttest:pool.target")
            p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
            s_node = p_xml.xmltreefile.find('/source')
            i_node = ET.SubElement(s_node, 'initiator')
            ET.SubElement(i_node, 'iqn', {'name': initiator_iqn})
            p_xml.xmltreefile.write()
            poolxml = p_xml.xml
            logging.debug('XML after add Multi-IQN:\n%s' % p_xml)

        # Step (4)
        # Undefine pool
        if not same_source_test:
            result = virsh.pool_undefine(pool_name)
            utlv.check_exit_status(result)
            check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(poolxml, debug=True)
        # Give error msg when exit status is not expected
        if "/" in new_pool_name and not result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=639923 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        if "." in new_pool_name and result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1333248 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        if same_source_test and not result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1171984 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            # Step (6)
            # Buid pool
            # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool
            # disk/fs pool: as prepare step already make label and create filesystem
            #               for the disk, use '--overwrite' is necessary
            # logical_pool: build pool will fail if VG already exist, BZ#1373711
            if new_pool_name:
                pool_name = new_pool_name
            if pool_type != "logical":
                result = virsh.pool_build(pool_name,
                                          build_option,
                                          ignore_status=True)
                utlv.check_exit_status(result)

            # Step (7)
            # Pool start
            result = virsh.pool_start(pool_name,
                                      debug=True,
                                      ignore_status=True)
            utlv.check_exit_status(result)

            # Step (8)
            # Pool list
            option = "--persistent --type %s" % pool_type
            check_pool_list(pool_name, option)

            # Step (9)
            # Pool autostart
            result = virsh.pool_autostart(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

            # Step (10)
            # Pool list
            option = "--autostart --type %s" % pool_type
            check_pool_list(pool_name, option)

            # Step (11)
            # Restart libvirtd and check the autostart pool
            utils_libvirtd.libvirtd_restart()
            option = "--autostart --persistent"
            check_pool_list(pool_name, option)

            # Step (12)
            # Pool destroy
            if virsh.pool_destroy(pool_name):
                logging.debug("Pool %s destroyed.", pool_name)
            else:
                test.fail("Destroy pool % failed." % pool_name)

            # Step (13)
            # Pool autostart disable
            result = virsh.pool_autostart(pool_name,
                                          "--disable",
                                          ignore_status=True)
            utlv.check_exit_status(result)

            # Step (14)
            # Repeat step (11)
            utils_libvirtd.libvirtd_restart()
            option = "--autostart"
            check_pool_list(pool_name, option, True)

            # Step (15)
            # Pool start
            # When libvirtd starts up, it'll check to see if any of the storage
            # pools have been activated externally. If so, then it'll mark the
            # pool as active. This is independent of autostart.
            # So a directory based storage pool is thus pretty much always active,
            # and so as the SCSI pool.
            if pool_type not in ["dir", 'scsi']:
                result = virsh.pool_start(pool_name, ignore_status=True)
                utlv.check_exit_status(result)

            # Step (16)
            # Pool info
            pool_info = _pool.pool_info(pool_name)
            logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

            # Step (17)
            # Pool UUID
            result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_info(pool_info, "UUID", result.stdout.strip())

            # Step (18)
            # Pool Name
            result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_info(pool_info, "Name", result.stdout.strip())

            # Step (19)
            # Pool refresh for 'dir' type pool
            if pool_type == "dir":
                os.mknod(vol_path)
                result = virsh.pool_refresh(pool_name)
                utlv.check_exit_status(result)
                check_vol_list(vol_name, pool_name)

            # Step (20)
            # Create an over size vol in pool(expect fail), then check pool:
            # 'Capacity', 'Allocation' and 'Available'
            # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
            # and glusterfs pool not support create volume, so not test them
            if pool_type != "netfs":
                vol_capacity = "10000G"
                vol_allocation = "10000G"
                result = virsh.vol_create_as("oversize_vol", pool_name,
                                             vol_capacity, vol_allocation,
                                             "raw")
                utlv.check_exit_status(result, True)
                new_info = _pool.pool_info(pool_name)
                check_items = ["Capacity", "Allocation", "Available"]
                for i in check_items:
                    check_pool_info(pool_info, i, new_info[i])

            # Step (21)
            # Undefine pool, this should fail as the pool is active
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            utlv.check_exit_status(result, expect_error=True)
            check_pool_list(pool_name, "", False)

            # Step (22)
            # Pool destroy
            if virsh.pool_destroy(pool_name):
                logging.debug("Pool %s destroyed.", pool_name)
            else:
                test.fail("Destroy pool % failed." % pool_name)

            # Step (23)
            # Pool delete for 'dir' type pool
            if pool_type == "dir":
                for f in os.listdir(pool_target):
                    os.remove(os.path.join(pool_target, f))
                    result = virsh.pool_delete(pool_name, ignore_status=True)
                    utlv.check_exit_status(result)
                    option = "--inactive --type %s" % pool_type
                    check_pool_list(pool_name, option)
                    if os.path.exists(pool_target):
                        test.fail("The target path '%s' still exist." %
                                  pool_target)
                        result = virsh.pool_start(pool_name,
                                                  ignore_status=True)
                        utlv.check_exit_status(result, True)

            # Step (24)
            # Pool undefine
                result = virsh.pool_undefine(pool_name, ignore_status=True)
                utlv.check_exit_status(result)
                check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(**params)
            utlv.setup_or_cleanup_iscsi(False)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(poolxml):
            os.remove(poolxml)
def run(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file. The file could be given by tester or
    generated by dumpxml a pre-defined pool.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    option = params.get("pool_create_extra_option", "")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_create_target", "pool_target")
    duplicate_element = params.get("pool_create_duplicate_element", "")
    new_pool_name = params.get("new_pool_create_name")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    if "/PATH/TO/POOL.XML" in pool_xml_f:
        raise error.TestNAError("Please replace %s with valid pool xml file" %
                                pool_xml_f)
    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        raise error.TestFail("Pool %s already exist" % pool_name)

    emulated_image = "emulated-image"
    kwargs = {'image_size': '1G', 'source_path': source_path,
              'source_name': source_name, 'source_format': source_format}
    pvt = utlv.PoolVolumeTest(test, params)
    old_uuid = None
    if pre_def_pool:
        try:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            old_uuid = virsh.pool_uuid(pool_name).stdout.strip()
            if duplicate_element == "name":
                pass
            elif duplicate_element == "uuid":
                # Remove <uuid>
                cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
                utils.run(cmd)
            elif duplicate_element == "source":
                # Remove <uuid> and update <name>
                cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
                utils.run(cmd)
                cmd = "sed -i 's/<name>.*<\/name>/<name>%s<\/name>/g' %s" % (new_pool_name, pool_xml_f)
                utils.run(cmd)
            else:
                # The transient pool will gone after destroyed
                virsh.pool_destroy(pool_name)
            new_source_format = params.get("new_pool_src_format")
            if new_source_format:
                cmd = "sed -i s/type=\\\'%s\\\'/type=\\\'%s\\\'/g %s" % (
                    source_format, new_source_format, pool_xml_f)
                utils.run(cmd)
        except Exception, e:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **kwargs)
            raise error.TestError("Error occurred when prepare pool xml:\n %s"
                                  % e)
Exemple #17
0
def run_virsh_pool_create(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file.
    1) Use xml file from test parameters to create pool.
    2) Dump the exist pool to create pool.
    3) Negatively create pool(invalid option, no file, invalid file, duplicate
       pool name, and create pool under readonly mode).
    """

    pool_xml = params.get("pool_create_xml_file")
    pool_name = params.get("pool_create_name")
    option = params.get("pool_create_extra_option", "")
    use_exist_pool = "yes" == params.get("pool_create_use_exist_pool", "no")
    exist_pool_name = params.get("pool_create_exist_pool_name", "default")
    undefine_exist_pool = "yes" == params.get(
        "pool_create_undefine_exist_pool", "no")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")

    # Deal with each parameters
    # backup the exist pool
    pool_ins = libvirt_storage.StoragePool()
    if use_exist_pool:
        if not pool_ins.pool_exists(exist_pool_name):
            raise error.TestFail("Require pool: %s exist", exist_pool_name)
        backup_xml = libvirt_xml.PoolXML.backup_xml(exist_pool_name)
        pool_xml = backup_xml

    # Delete the exist pool
    start_pool = False
    if undefine_exist_pool:
        poolxml = libvirt_xml.PoolXML.new_from_dumpxml(exist_pool_name)
        poolxml.name = pool_name
        pool_xml = poolxml.xml
        if pool_ins.is_pool_active(exist_pool_name):
            start_pool = True
        if not pool_ins.delete_pool(exist_pool_name):
            raise error.TestFail("Delete pool: %s fail", exist_pool_name)

    # Create an invalid pool xml file
    if pool_xml == "invalid-pool-xml":
        tmp_xml = xml_utils.TempXMLFile()
        tmp_xml.write('"<pool><<<BAD>>><\'XML</name\>'
                      '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>')
        tmp_xml.flush()
        pool_xml = tmp_xml.name
        logging.info(pool_xml)

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    # Run virsh test
    if os.path.isfile(pool_xml):
        logging.debug("Create pool from file:\n %s",
                      open(pool_xml, 'r').read())
    try:
        cmd_result = virsh.pool_create(pool_xml,
                                       option,
                                       ignore_status=True,
                                       debug=True,
                                       readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            elif not pool_check(pool_name, pool_ins):
                raise error.TestFail("Pool check fail")
        elif status_error and status == 0:
            raise error.TestFail("Expect fail, but run successfully.")
    finally:
        # Recover env
        if pool_ins.is_pool_active(pool_name):
            virsh.pool_destroy(pool_name)
        if undefine_exist_pool and not pool_ins.pool_exists(exist_pool_name):
            virsh.pool_define(backup_xml)
            if start_pool:
                pool_ins.start_pool(exist_pool_name)
def run(test, params, env):
    """
    Test command: virsh pool-define-as; pool-build; pool-start; vol-create-as;
    vol-list; attach-device; login; mount and dd; reboot; check persistence;
    detach-device; pool-destroy; pool-undefine; clear lv,vg and pv;
    Create a libvirt npiv pool from a vHBA's device mapper device and create
    a volume out of the newly created pool and attach it to a guest, mount it,
    reboot and check persistence after reboot.

    Pre-requisite :
    Host should have a vHBA associated with a mpath device
    """

    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pool_type = params.get("pool_type", "dir")
    scsi_wwpn = params.get("scsi_wwpn", "WWPN_EXAMPLE")
    scsi_wwnn = params.get("scsi_wwnn", "WWNN_EXAMPLE")
    pool_target = params.get("pool_target", "pool_target")
    target_device = params.get("disk_target_dev", "vda")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    frmt = params.get("volume_format", 'qcow2')
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    mount_disk = None
    test_unit = None

    if 'EXAMPLE' in scsi_wwnn or 'EXAMPLE' in scsi_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    if not vm.is_alive():
        vm.start()
    pool_extra_args = ""
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    process.run("service multipathd restart", shell=True)
    online_hbas_list = nodedev.find_hbas("hba")
    first_online_hba = online_hbas_list[0]
    old_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the old mpath devs are: %s" % old_mpath_devs)
    new_vhbas = nodedev.nodedev_create_from_xml(
        {"nodedev_parent": first_online_hba,
         "scsi_wwnn": scsi_wwnn,
         "scsi_wwpn": scsi_wwpn})
    logging.info("Newly created vHBA %s" % new_vhbas)
    process.run("service multipathd restart", shell=True)

    utils_misc.wait_for(
        lambda: nodedev.is_mpath_devs_added(old_mpath_devs), timeout=5)

    cur_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the current mpath devs are: %s" % cur_mpath_devs)
    new_mpath_devs = list(set(cur_mpath_devs).difference(
        set(old_mpath_devs)))

    logging.debug("newly added mpath devs are: %s" % new_mpath_devs)
    if not new_mpath_devs:
        raise exceptions.TestFail("No newly added mpath devices found, \
                please check your FC settings")
    source_dev = os.path.join('/dev/mapper/', new_mpath_devs[0])
    logging.debug("We are going to use \"%s\" as our source device"
                  " to create a logical pool" % source_dev)

    cmd = "parted %s mklabel msdos -s" % source_dev
    cmd_result = process.run(cmd, shell=True)
    utlv.check_exit_status(cmd_result)

    if source_dev:
        pool_extra_args = ' --source-dev %s' % source_dev
    else:
        raise exceptions.TestFail(
            "The vHBA %s does not have any associated mpath device" % new_vhbas)

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    # if no online hba cards on host, mark case failed
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    try:
        cmd_result = virsh.pool_define_as(
            pool_name, pool_type, pool_target, pool_extra_args, ignore_status=True,
            debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_build(pool_name)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)

        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        cmd_result = virsh.vol_create_as(
            volume_name, pool_name, volume_capacity, allocation, frmt, "", debug=True)
        utlv.check_exit_status(cmd_result)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        logging.debug('Volume list %s', vol_list)
        for unit in vol_list:
            test_unit = vol_list[unit]
            logging.debug(unit)

        disk_params = {'type_name': "file", 'target_dev': target_device,
                       'target_bus': "virtio", 'source_file': test_unit,
                       'driver_name': "qemu", 'driver_type': "raw"}
        disk_xml = utlv.create_disk_xml(disk_params)
        session = vm.wait_for_login()

        bf_disks = libvirt_vm.get_disks()

        attach_success = virsh.attach_device(
            vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()

        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        output = session.cmd_status_output('lsblk', timeout=15)
        logging.debug("%s", output[1])

        session.cmd_status_output('mkfs.ext4 %s' % mount_disk)
        if mount_disk:
            logging.info("%s", mount_disk)
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Can not find mounted device")
        session.close()

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("Mount output: %s", output[1])
        if '/mnt' in output[1]:
            logging.debug("Mount Successful accross reboot")
        session.close()

        status = virsh.detach_device(vm_name, disk_xml,
                                     debug=True)
        utlv.check_exit_status(status)

    finally:
        vm.destroy(gracefully=False)
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if test_unit:
            process.system('lvremove -f %s' % test_unit, verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            nodedev.vhbas_cleanup(new_vhbas.split())
        process.run("service multipathd restart", shell=True)
def run(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file. The file could be given by tester or
    generated by dumpxml a pre-defined pool.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    option = params.get("pool_create_extra_option", "")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    duplicate_element = params.get("pool_create_duplicate_element", "")
    new_pool_name = params.get("new_pool_create_name")
    no_disk_label = "yes" == params.get("no_disk_label", "no")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    if "/PATH/TO/POOL.XML" in pool_xml_f:
        test.cancel("Please replace %s with valid pool xml file" %
                    pool_xml_f)
    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        test.fail("Pool %s already exist" % pool_name)

    emulated_image = "emulated-image"
    kwargs = {'image_size': '1G', 'source_path': source_path,
              'source_name': source_name, 'source_format': source_format}
    pvt = utlv.PoolVolumeTest(test, params)
    old_uuid = None
    new_device_name = None
    if pre_def_pool:
        try:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            old_uuid = virsh.pool_uuid(pool_name).stdout.strip()
            if no_disk_label:
                # Update <device_path>
                logging.debug("Try to update device path")
                new_device_name = utlv.setup_or_cleanup_iscsi(True)
                p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
                s_xml = pool_xml.SourceXML()
                s_xml.device_path = new_device_name
                p_xml.set_source(s_xml)
                pool_xml_f = p_xml.xml
            if duplicate_element == "name":
                pass
            elif duplicate_element == "uuid":
                pass
            elif duplicate_element == "source":
                # Remove <uuid> and update <name>
                cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
                process.run(cmd, shell=True)
                cmd = "sed -i 's/<name>.*<\/name>/<name>%s<\/name>/g' %s" % (new_pool_name, pool_xml_f)
                process.run(cmd, shell=True)
            else:
                # The transient pool will gone after destroyed
                virsh.pool_destroy(pool_name)
            new_source_format = params.get("new_pool_src_format")
            if new_source_format:
                cmd = "sed -i s/type=\\\'%s\\\'/type=\\\'%s\\\'/g %s" % (
                    source_format, new_source_format, pool_xml_f)
                process.run(cmd, shell=True)
            # Remove uuid
            cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
            process.run(cmd, shell=True)
        except Exception as details:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **kwargs)
            if new_device_name:
                utlv.setup_or_cleanup_iscsi(False)
            test.error("Error occurred when prepare pool xml:\n %s"
                       % details)
    # Create an invalid pool xml file
    if pool_xml_f == "invalid-pool-xml":
        tmp_xml_f = xml_utils.TempXMLFile()
        tmp_xml_f.write('"<pool><<<BAD>>><\'XML</name\>'
                        '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>')
        tmp_xml_f.flush()
        pool_xml_f = tmp_xml_f.name
    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True
    # Run virsh test
    if os.path.exists(pool_xml_f):
        with open(pool_xml_f, 'r') as f:
            logging.debug("Create pool from file:\n %s", f.read())
    try:
        cmd_result = virsh.pool_create(pool_xml_f, option, ignore_status=True,
                                       debug=True, readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                test.fail(err)
            utlv.check_actived_pool(pool_name)
            pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
            logging.debug("Pool detail: %s", pool_detail)
            if pool_detail['uuid'] == old_uuid:
                test.fail("New created pool still use the old UUID %s"
                          % old_uuid)
        else:
            if status == 0:
                test.fail("Expect fail, but run successfully.")
            else:
                logging.debug("Command fail as expected")
    finally:
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        if new_device_name:
            utlv.setup_or_cleanup_iscsi(False)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
def run(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file. The file could be given by tester or
    generated by dumpxml a pre-defined pool.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    option = params.get("pool_create_extra_option", "")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    duplicate_element = params.get("pool_create_duplicate_element", "")
    new_pool_name = params.get("new_pool_create_name")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    if "/PATH/TO/POOL.XML" in pool_xml_f:
        raise error.TestNAError("Please replace %s with valid pool xml file" %
                                pool_xml_f)
    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        raise error.TestFail("Pool %s already exist" % pool_name)

    emulated_image = "emulated-image"
    kwargs = {
        'image_size': '1G',
        'source_path': source_path,
        'source_name': source_name,
        'source_format': source_format
    }
    pvt = utlv.PoolVolumeTest(test, params)
    old_uuid = None
    if pre_def_pool:
        try:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            old_uuid = virsh.pool_uuid(pool_name).stdout.strip()
            if duplicate_element == "name":
                pass
            elif duplicate_element == "uuid":
                # Remove <uuid>
                cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
                utils.run(cmd)
            elif duplicate_element == "source":
                # Remove <uuid> and update <name>
                cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
                utils.run(cmd)
                cmd = "sed -i 's/<name>.*<\/name>/<name>%s<\/name>/g' %s" % (
                    new_pool_name, pool_xml_f)
                utils.run(cmd)
            else:
                # The transient pool will gone after destroyed
                virsh.pool_destroy(pool_name)
            new_source_format = params.get("new_pool_src_format")
            if new_source_format:
                cmd = "sed -i s/type=\\\'%s\\\'/type=\\\'%s\\\'/g %s" % (
                    source_format, new_source_format, pool_xml_f)
                utils.run(cmd)
        except Exception, e:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **kwargs)
            raise error.TestError(
                "Error occurred when prepare pool xml:\n %s" % e)
Exemple #21
0
def run(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file. The file could be given by tester or
    generated by dumpxml a pre-defined pool.
    """
    pool_xml_f = params.get("pool_create_xml_file", "")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    option = params.get("pool_create_extra_option", "")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    duplicate_element = params.get("pool_create_duplicate_element", "")
    new_pool_name = params.get("new_pool_create_name")
    no_disk_label = "yes" == params.get("no_disk_label", "no")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        test.fail("Pool %s already exist" % pool_name)

    kwargs = {'image_size': '1G', 'source_path': source_path,
              'source_name': source_name, 'source_format': source_format,
              'emulated_image': "emulated-image", 'pool_target': pool_target,
              'pool_name': pool_name}
    params.update(kwargs)
    pvt = utlv.PoolVolumeTest(test, params)
    old_uuid = None
    new_device_name = None
    if pre_def_pool:
        try:
            pvt.pre_pool(**params)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            old_uuid = virsh.pool_uuid(pool_name).stdout.strip()
            if no_disk_label:
                # Update <device_path>
                logging.debug("Try to update device path")
                new_device_name = utlv.setup_or_cleanup_iscsi(True)
                p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
                s_xml = pool_xml.SourceXML()
                s_xml.device_path = new_device_name
                p_xml.set_source(s_xml)
                pool_xml_f = p_xml.xml
            if duplicate_element == "name":
                pass
            elif duplicate_element == "uuid":
                pass
            elif duplicate_element == "source":
                # Remove <uuid> and update <name>
                cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
                process.run(cmd, shell=True)
                cmd = "sed -i 's/<name>.*<\/name>/<name>%s<\/name>/g' %s" % (new_pool_name, pool_xml_f)
                process.run(cmd, shell=True)
            else:
                # The transient pool will gone after destroyed
                virsh.pool_destroy(pool_name)
            new_source_format = params.get("new_pool_src_format")
            if new_source_format:
                cmd = "sed -i s/type=\\\'%s\\\'/type=\\\'%s\\\'/g %s" % (
                    source_format, new_source_format, pool_xml_f)
                process.run(cmd, shell=True)
            # Remove uuid
            cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
            process.run(cmd, shell=True)
        except Exception as details:
            pvt.cleanup_pool(**params)
            if new_device_name:
                utlv.setup_or_cleanup_iscsi(False)
            test.error("Error occurred when prepare pool xml:\n %s"
                       % details)
    # Create an invalid pool xml file
    if pool_xml_f == "invalid-pool-xml":
        tmp_xml_f = xml_utils.TempXMLFile()
        tmp_xml_f.write('"<pool><<<BAD>>><\'XML</name\>'
                        '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>')
        tmp_xml_f.flush()
        pool_xml_f = tmp_xml_f.name
    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True
    # Run virsh test
    if os.path.exists(pool_xml_f):
        with open(pool_xml_f, 'r') as f:
            logging.debug("Create pool from file:\n %s", f.read())
    try:
        cmd_result = virsh.pool_create(pool_xml_f, option, ignore_status=True,
                                       debug=True, readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                test.fail(err)
            utlv.check_actived_pool(pool_name)
            pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
            logging.debug("Pool detail: %s", pool_detail)
            if pool_detail['uuid'] == old_uuid:
                test.fail("New created pool still use the old UUID %s"
                          % old_uuid)
        else:
            if status == 0:
                test.fail("Expect fail, but run successfully.")
            else:
                logging.debug("Command fail as expected")
    finally:
        pvt.cleanup_pool(**params)
        if new_device_name:
            utlv.setup_or_cleanup_iscsi(False)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
def run(test, params, env):
    """
    1. prepare a fc lun with one of following methods
        - create a scsi pool&vol
        - create a vhba
    2. prepare the virtual disk xml, as one of following
        - source = /dev/disk/by-path
        - source = /dev/mapper/mpathX
        - source = pool&vol format
    3. start a vm with above disk as vdb
    4. create disk-only snapshot of vdb
    5. check the snapshot-list and snapshot file's existence
    6. mount vdb and touch file to it
    7. revert the snapshot and check file's existence
    8. delete snapshot
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "file")
    disk_size = params.get("disk_size", "100M")
    device_target = params.get("device_target", "vdb")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    vd_format = params.get("vd_format", "")
    snapshot_dir = params.get("snapshot_dir", "/tmp")
    snapshot_name = params.get("snapshot_name", "s1")
    pool_name = params.get("pool_name", "")
    pool_target = params.get("pool_target", "/dev")
    snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no")
    new_vhbas = []
    current_vhbas = []
    new_vhba = []
    path_to_blk = ""
    lun_sl = []
    new_disk = ""
    pool_ins = None
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    vm = env.get_vm(vm_name)
    online_hbas = utils_npiv.find_hbas("hba")
    if not online_hbas:
        raise exceptions.TestSkipError("There is no online hba cards.")
    old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                       replace_existing=True)
    first_online_hba = online_hbas[0]
    old_vhbas = utils_npiv.find_hbas("vhba")
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir,
                            vm.address_cache)
    old_disks = virt_vm.get_disks()

    if vm.is_alive():
        vm.destroy(gracefully=False)
    if pool_name:
        pool_ins = libvirt_storage.StoragePool()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    try:
        # prepare a fc lun
        if vd_format in ['scsi_vol']:
            if pool_ins.pool_exists(pool_name):
                raise exceptions.TestFail("Pool %s already exist" % pool_name)
            prepare_scsi_pool(pool_name, wwnn, wwpn,
                              first_online_hba, pool_target)
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT)
            if not utils_npiv.is_vhbas_added(old_vhbas):
                raise exceptions.TestFail("vHBA not successfully created")
            current_vhbas = utils_npiv.find_hbas("vhba")
            new_vhba = list(set(current_vhbas).difference(
                set(old_vhbas)))[0]
            new_vhbas.append(new_vhba)
            new_vhba_scsibus = re.sub("\D", "", new_vhba)
            utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                timeout=_TIMEOUT)
            new_blks = get_blks_by_scsi(new_vhba_scsibus)
            if not new_blks:
                raise exceptions.TestFail("block device not found with scsi_%s",
                                          new_vhba_scsibus)
            first_blk_dev = new_blks[0]
            utils_misc.wait_for(
                lambda: get_symbols_by_blk(first_blk_dev),
                timeout=_TIMEOUT)
            lun_sl = get_symbols_by_blk(first_blk_dev)
            if not lun_sl:
                raise exceptions.TestFail("lun symbolic links not found under "
                                          "/dev/disk/by-path/ for blk dev %s" %
                                          first_blk_dev)
            lun_dev = lun_sl[0]
            path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
        elif vd_format in ['mpath', 'by_path']:
            old_mpath_devs = utils_npiv.find_mpath_devs()
            new_vhba = utils_npiv.nodedev_create_from_xml(
                    {"nodedev_parent": first_online_hba,
                     "scsi_wwnn": wwnn,
                     "scsi_wwpn": wwpn})
            utils_misc.wait_for(
                lambda: utils_npiv.is_vhbas_added(old_vhbas),
                timeout=_TIMEOUT*2)
            if not new_vhba:
                raise exceptions.TestFail("vHBA not sucessfully generated.")
            new_vhbas.append(new_vhba)
            if vd_format == "mpath":
                utils_misc.wait_for(
                    lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
                    timeout=_TIMEOUT*5)
                if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
                    raise exceptions.TestFail("mpath dev not generated.")
                cur_mpath_devs = utils_npiv.find_mpath_devs()
                new_mpath_devs = list(set(cur_mpath_devs).difference(
                    set(old_mpath_devs)))
                logging.debug("The newly added mpath dev is: %s",
                              new_mpath_devs)
                path_to_blk = "/dev/mapper/" + new_mpath_devs[0]
            elif vd_format == "by_path":
                new_vhba_scsibus = re.sub("\D", "", new_vhba)
                utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                    timeout=_TIMEOUT)
                new_blks = get_blks_by_scsi(new_vhba_scsibus)
                if not new_blks:
                    raise exceptions.TestFail("blk dev not found with scsi_%s",
                                              new_vhba_scsibus)
                first_blk_dev = new_blks[0]
                utils_misc.wait_for(
                    lambda: get_symbols_by_blk(first_blk_dev),
                    timeout=_TIMEOUT)
                lun_sl = get_symbols_by_blk(first_blk_dev)
                if not lun_sl:
                    raise exceptions.TestFail("lun symbolic links not found in "
                                              "/dev/disk/by-path/ for %s" %
                                              first_blk_dev)
                lun_dev = lun_sl[0]
                path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
            else:
                pass
        else:
            raise exceptions.TestSkipError("Not provided how to pass"
                                           "virtual disk to VM.")

        # create qcow2 file on the block device with specified size
        if path_to_blk:
            cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size)
            try:
                process.run(cmd, shell=True)
            except process.cmdError as detail:
                raise exceptions.TestFail("Fail to create qcow2 on blk dev: %s",
                                          detail)
        else:
            raise exceptions.TestFail("Don't have a vaild path to blk dev.")

        # prepare disk xml
        if "vol" in vd_format:
            vol_list = utlv.get_vol_list(pool_name, vol_check=True,
                                         timeout=_TIMEOUT*3)
            test_vol = list(vol_list.keys())[0]
            disk_params = {'type_name': disk_type,
                           'target_dev': device_target,
                           'target_bus': target_bus,
                           'source_pool': pool_name,
                           'source_volume': test_vol,
                           'driver_type': driver_type}
        else:
            disk_params = {'type_name': disk_type,
                           'device': disk_device,
                           'driver_name': driver_name,
                           'driver_type': driver_type,
                           'source_file': path_to_blk,
                           'target_dev': device_target,
                           'target_bus': target_bus}
        if vm.is_alive():
            vm.destroy(gracefully=False)
        new_disk = disk.Disk()
        new_disk.xml = open(utlv.create_disk_xml(disk_params)).read()

        # start vm with the virtual disk
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        cur_disks = virt_vm.get_disks()
        mount_disk = "".join(list(set(old_disks) ^ set(cur_disks)))

        # mkfs and mount disk in vm, create a file on that disk.
        if not mount_disk:
            logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks)
            raise exceptions.TestFail("No new disk found in vm.")
        mkfs_and_mount(session, mount_disk)
        create_file_in_vm(session, "/mnt/before_snapshot.txt", "before")

        # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path
        if snapshot_disk_only:
            vm_blks = list(vm.get_disk_devices().keys())
            options = "%s --disk-only" % snapshot_name
            for vm_blk in vm_blks:
                snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name
                if os.path.exists(snapshot_file):
                    os.remove(snapshot_file)
                options = options + " --diskspec %s,file=%s" % (vm_blk,
                                                                snapshot_file)
        else:
            options = snapshot_name
        utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options))

        # check virsh snapshot-list
        logging.debug("Running: snapshot-list %s", vm_name)
        snapshot_list = virsh.snapshot_list(vm_name)
        logging.debug("snapshot list is: %s", snapshot_list)
        if not snapshot_list:
            raise exceptions.TestFail("snapshots not found after creation.")

        # snapshot-revert doesn't support external snapshot for now. so
        # only check this with internal snapshot.
        if not snapshot_disk_only:
            create_file_in_vm(session, "/mnt/after_snapshot.txt", "after")
            logging.debug("Running: snapshot-revert %s %s",
                          vm_name, snapshot_name)
            utlv.check_exit_status(virsh.snapshot_revert(vm_name, snapshot_name))
            session = vm.wait_for_login()
            file_existence, file_content = get_file_in_vm(session,
                                                          "/mnt/after_snapshot.txt")
            logging.debug("file exist = %s, file content = %s",
                          file_existence, file_content)
            if file_existence:
                raise exceptions.TestFail("The file created "
                                          "after snapshot still exists.")
            file_existence, file_content = get_file_in_vm(session,
                                                          "/mnt/before_snapshot.txt")
            logging.debug("file eixst = %s, file content = %s",
                          file_existence, file_content)
            if ((not file_existence) or (file_content.strip() != "before")):
                raise exceptions.TestFail("The file created "
                                          "before snapshot is lost.")
        # delete snapshots
            # if diskonly, delete --metadata and remove files
            # if not diskonly, delete snapshot
        if snapshot_disk_only:
            options = "--metadata"
        else:
            options = ""
        for snap in snapshot_list:
            logging.debug("deleting snapshot %s with options %s",
                          snap, options)
            result = virsh.snapshot_delete(vm_name, snap, options)
            logging.debug("result of snapshot-delete: %s",
                          result.stdout.strip())
            if snapshot_disk_only:
                vm_blks = list(vm.get_disk_devices().keys())
                for vm_blk in vm_blks:
                    snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap
                    if os.path.exists(snapshot_file):
                        os.remove(snapshot_file)
        snapshot_list = virsh.snapshot_list(vm_name)
        if snapshot_list:
            raise exceptions.TestFail("Snapshot not deleted: %s", snapshot_list)
    except Exception as detail:
        raise exceptions.TestFail("exception happens: %s", detail)
    finally:
        logging.debug("Start to clean up env...")
        vmxml_backup.sync()
        if pool_ins and pool_ins.pool_exists(pool_name):
            virsh.pool_destroy(pool_name)
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Exemple #23
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Destroy the pool
    (21) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (22) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    cleanup_env = [False, False, False]

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug(
                "Find volume '%s' in pool '%s'.", vol_name, pool_name)
        else:
            raise error.TestFail(
                "Not find volume '%s' in pool '%s'." %
                (vol_name, pool_name))

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            raise error.TestFail("Pool info dictionary is needed.")
        if pool_info[check_point] == value:
            logging.debug("Pool '%s' is '%s'.", check_point, value)
        else:
            raise error.TestFail("Pool '%s' isn't '%s'." % (check_point,
                                                            value))

    # Run Testcase
    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        result = utils_test.libvirt.define_pool(pool_name, pool_type,
                                                pool_target, cleanup_env)
        check_exit_status(result, status_error)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (4)
        # Undefine pool
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(pool_xml)
        check_exit_status(result, status_error)

        # Step (6)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
        # Options --overwrite and --no-overwrite can only be used to
        # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            check_exit_status(result)

        # Step (7)
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)

        # Step (8)
        # Pool list
        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (9)
        # Pool autostart
        result = virsh.pool_autostart(pool_name, ignore_status=True)
        check_exit_status(result)

        # Step (10)
        # Pool list
        option = "--autostart --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (11)
        # Restart libvirtd and check the autostart pool
        utils_libvirtd.libvirtd_restart()
        option = "--autostart --persistent"
        check_pool_list(pool_name, option)

        # Step (12)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (13)
        # Pool autostart disable
        result = virsh.pool_autostart(pool_name, "--disable",
                                      ignore_status=True)
        check_exit_status(result)

        # Step (14)
        # Repeat step (11)
        utils_libvirtd.libvirtd_restart()
        option = "--autostart"
        check_pool_list(pool_name, option, True)

        # Step (15)
        # Pool start
        # If the filesystem cntaining the directory is mounted, then the
        # directory will show as running, which means the local 'dir' pool
        # don't need start after restart libvirtd
        if pool_type != "dir":
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        # Step (16)
        # Pool info
        pool_info = _pool.pool_info(pool_name)
        logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

        # Step (17)
        # Pool UUID
        result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
        check_exit_status(result)
        check_pool_info(pool_info, "UUID", result.stdout.strip())

        # Step (18)
        # Pool Name
        result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
        check_exit_status(result)
        check_pool_info(pool_info, "Name", result.stdout.strip())

        # Step (19)
        # Pool refresh for 'dir' type pool
        if pool_type == "dir":
            os.mknod(vol_path)
            result = virsh.pool_refresh(pool_name)
            check_exit_status(result)
            check_vol_list(vol_name, pool_name)

        # Step(20)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (21)
        # Pool delete for 'dir' type pool
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if os.path.exists(pool_target):
                raise error.TestFail("The target path '%s' still exist." %
                                     pool_target)
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result, True)

        # Step (22)
        # Pool undefine
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_env[1]:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare pool, volume.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def create_pool(p_name, p_type, p_target):
        """
        Define and start a pool.

        :param p_name. Pool name.
        :param p_type. Pool type.
        :param p_target. Pool target path.
        """
        p_xml = pool_xml.PoolXML(pool_type=p_type)
        p_xml.name = p_name
        p_xml.target_path = p_target

        if not os.path.exists(p_target):
            os.mkdir(p_target)
        p_xml.xmltreefile.write()
        ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_build(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_start(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, target_encrypt_params, vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param target_encrypt_params encrypt parameters in dict.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.encryption = volxml.new_encryption(**target_encrypt_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_secret(vol_path):
        """
        Create secret.

        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        libvirt.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as e:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            secret_password_no_encoded.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string,
                                     **virsh_dargs)
        libvirt.check_exit_status(ret)
        return encryption_uuid

    def get_secret_list():
        """
        Get secret list.

        :return: secret list
        """
        logging.info("Get secret list ...")
        secret_list = virsh.secret_list().stdout.strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is not empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status(
                "rpm -q parted || "
                "yum install -y parted", 300)
            if rpm_stat != 0:
                test.fail("Failed to query/install parted, make sure"
                          " that you have usable repo in guest")

            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            device_source = os.path.join(os.sep, 'dev', added_part)
            libvirt.mk_label(device_source, session=session)
            libvirt.mk_part(device_source, size="10M", session=session)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % added_part)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            session.close()
            if s != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    encryption_in_source = "yes" == params.get("encryption_in_source")
    encryption_out_source = "yes" == params.get("encryption_out_source")
    if encryption_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Cannot put <encryption> inside disk <source> in "
                    "this libvirt version.")
    # Pool/Volume options.
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    volume_name = params.get("vol_name")
    volume_alloc = params.get("vol_alloc")
    volume_cap_unit = params.get("vol_cap_unit")
    volume_cap = params.get("vol_cap")
    volume_target_path = params.get("target_path")
    volume_target_format = params.get("target_format")
    volume_target_encypt = params.get("target_encypt", "")
    volume_target_label = params.get("target_label")

    hotplug = "yes" == params.get("virt_disk_device_hotplug")
    status_error = "yes" == params.get("status_error")
    secret_type = params.get("secret_type", "passphrase")
    secret_password_no_encoded = params.get("secret_password_no_encoded",
                                            "redhat")
    virt_disk_qcow2_format = "yes" == params.get("virt_disk_qcow2_format")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    sec_encryption_uuid = None
    try:
        # Prepare the disk.
        sec_uuids = []
        # Clean up dirty secrets in test environments if there are.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        create_pool(pool_name, pool_type, pool_target)
        vol_params = {
            "name": volume_name,
            "capacity": int(volume_cap),
            "allocation": int(volume_alloc),
            "format": volume_target_format,
            "path": volume_target_path,
            "label": volume_target_label,
            "capacity_unit": volume_cap_unit
        }
        vol_encryption_params = {}
        vol_encryption_params.update({"format": volume_target_encypt})
        # For any disk format other than qcow2, it need create secret firstly.
        if not virt_disk_qcow2_format:
            # create secret.
            sec_encryption_uuid = create_secret(volume_target_path)
            sec_uuids.append(sec_encryption_uuid)
            vol_encryption_params.update(
                {"secret": {
                    "type": secret_type,
                    "uuid": sec_encryption_uuid
                }})
        try:
            # If Libvirt version is lower than 2.5.0
            # Creating luks encryption volume is not supported,so skip it.
            create_vol(pool_name, vol_encryption_params, vol_params)
        except AssertionError as info:
            err_msgs = ("create: invalid option")
            if str(info).count(err_msgs):
                test.error("Creating luks encryption volume "
                           "is not supported on this libvirt version")
            else:
                test.error("Failed to create volume." "Error: %s" % str(info))
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        if device_type == "file":
            dev_attrs = "file"
        elif device_type == "dir":
            dev_attrs = "dir"
        else:
            dev_attrs = "dev"
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {
                dev_attrs: volume_target_path
            }})
        disk_xml.driver = {
            "name": "qemu",
            "type": volume_target_format,
            "cache": "none"
        }
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuids.append(v_xml.encryption.secret["uuid"])
        if not status_error:
            logging.debug("vol info -- format: %s, type: %s, uuid: %s",
                          v_xml.encryption.format,
                          v_xml.encryption.secret["type"],
                          v_xml.encryption.secret["uuid"])
            encryption_dict = {
                "encryption": v_xml.encryption.format,
                "secret": {
                    "type": v_xml.encryption.secret["type"],
                    "uuid": v_xml.encryption.secret["uuid"]
                }
            }
            if encryption_in_source:
                disk_source.encryption = disk_xml.new_encryption(
                    **encryption_dict)
            if encryption_out_source:
                disk_xml.encryption = disk_xml.new_encryption(
                    **encryption_dict)
        disk_xml.source = disk_source
        logging.debug("disk xml is:\n%s" % disk_xml)
        if not hotplug:
            # Sync VM xml.
            vmxml.add_device(disk_xml)
            vmxml.sync()

        try:
            # Start the VM and do disk hotplug if required,
            # then check disk status in vm.
            # Note that LUKS encrypted virtual disk without <encryption>
            # can be normally started or attached since qemu will just treat
            # it as RAW, so we don't test LUKS with status_error=TRUE.
            vm.start()
            vm.wait_for_login()
            if status_error:
                if hotplug:
                    logging.debug("attaching disk, expecting error...")
                    result = virsh.attach_device(vm_name, disk_xml.xml)
                    libvirt.check_exit_status(result, status_error)
                else:
                    test.fail("VM started unexpectedly.")
            else:
                if hotplug:
                    result = virsh.attach_device(vm_name,
                                                 disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
                    result = virsh.detach_device(vm_name,
                                                 disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                else:
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
        except virt_vm.VMStartError as e:
            if status_error:
                if hotplug:
                    test.fail(
                        "In hotplug scenario, VM should "
                        "start successfully but not."
                        "Error: %s", str(e))
                else:
                    logging.debug(
                        "VM failed to start as expected."
                        "Error: %s", str(e))
            else:
                # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
                # longer supported.
                err_msgs = ("AES-CBC encrypted qcow2 images is"
                            " no longer supported in system emulators")
                if str(e).count(err_msgs):
                    test.cancel(err_msgs)
                else:
                    test.fail("VM failed to start." "Error: %s" % str(e))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
def run(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file.
    1) Use xml file from test parameters to create pool.
    2) Dump the exist pool to create pool.
    3) Negatively create pool(invalid option, no file, invalid file, duplicate
       pool name, and create pool under readonly mode).
    """

    pool_xml = params.get("pool_create_xml_file")
    pool_name = params.get("pool_create_name")
    option = params.get("pool_create_extra_option", "")
    use_exist_pool = "yes" == params.get("pool_create_use_exist_pool", "no")
    exist_pool_name = params.get("pool_create_exist_pool_name", "default")
    undefine_exist_pool = "yes" == params.get(
        "pool_create_undefine_exist_pool", "no")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")
    exist_active = False

    # Deal with each parameters
    # backup the exist pool
    pool_ins = libvirt_storage.StoragePool()
    if use_exist_pool:
        if not pool_ins.pool_exists(exist_pool_name):
            raise error.TestFail("Require pool: %s exist", exist_pool_name)
        backup_xml = libvirt_xml.PoolXML.backup_xml(exist_pool_name)
        pool_xml = backup_xml
        exist_active = pool_ins.is_pool_active(exist_pool_name)

    # backup pool state
    pool_ins_state = virsh.pool_state_dict()
    logging.debug("Backed up pool(s): %s", pool_ins_state)

    if "--file" in option:
        pool_path = os.path.join(data_dir.get_data_dir(), 'images')
        dir_xml = """
<pool type='dir'>
  <name>%s</name>
  <target>
    <path>%s</path>
  </target>
</pool>
""" % (pool_name, pool_path)
        pool_xml = os.path.join(test.tmpdir, pool_xml)
        xml_object = open(pool_xml, 'w')
        xml_object.write(dir_xml)
        xml_object.close()

    # Delete the exist pool
    start_pool = False
    if undefine_exist_pool:
        poolxml = libvirt_xml.PoolXML.new_from_dumpxml(exist_pool_name)
        poolxml.name = pool_name
        pool_xml = poolxml.xml
        if pool_ins.is_pool_active(exist_pool_name):
            start_pool = True
        if not pool_ins.delete_pool(exist_pool_name):
            raise error.TestFail("Delete pool: %s fail", exist_pool_name)

    # Create an invalid pool xml file
    if pool_xml == "invalid-pool-xml":
        tmp_xml = xml_utils.TempXMLFile()
        tmp_xml.write('"<pool><<<BAD>>><\'XML</name\>'
                      '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>')
        tmp_xml.flush()
        pool_xml = tmp_xml.name
        logging.info(pool_xml)

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    # Run virsh test
    if os.path.isfile(pool_xml):
        logging.debug("Create pool from file:\n %s", open(pool_xml, 'r').read())
    try:
        cmd_result = virsh.pool_create(
            pool_xml,
            option,
            ignore_status=True,
            debug=True,
            readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            elif not pool_check(pool_name, pool_ins):
                raise error.TestFail("Pool check fail")
        elif status_error and status == 0:
            # For an inactive 'default' pool, when the test runs to create
            # an existing pool of 'default', the test will pass because the
            # 'default' pool will be considered a transient pool when the
            # 'name' and 'uuid' match (which they do in this case). So
            # don't fail the test unnecessarily
            if (pool_name == exist_pool_name and not exist_active):
                pass
            else:
                raise error.TestFail("Expect fail, but run successfully.")
    finally:
        # Recover env
        # If we have a different pool name than default OR
        # we need to undefine this tests created default pool OR
        # we had a transient, active default pool created above, then
        # we need to destroy what the test created.
        # NB: When the active, transient pool is destroyed the
        # previously defined, but inactive pool will now exist.
        if pool_name != exist_pool_name or undefine_exist_pool or \
           (pool_name == exist_pool_name and not exist_active):
            virsh.pool_destroy(pool_name)

        # restore the undefined default pool
        if undefine_exist_pool: # and not pool_ins.pool_exists(exist_pool_name):
            virsh.pool_define(backup_xml)
            if start_pool:
                pool_ins.start_pool(exist_pool_name)
            # Recover autostart
            if pool_ins_state[exist_pool_name]['autostart']:
                virsh.pool_autostart(exist_pool_name, ignore_status=False)
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    cleanup_env = [False, False, False]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    + " libvirt version.")

    acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user,
                 'debug': True}

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utils_test.libvirt.define_pool(pool_name, pool_type,
                                                pool_target, cleanup_env)
        check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            check_exit_status(result)

        # Step (3)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
        # Options --overwrite and --no-overwrite can only be used to
        # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name, option,
                                          ignore_status=True)
            check_exit_status(result, build_error)
            if build_error:
                # Redo under negative case to keep case continue
                result = virsh.pool_build(pool_name, option,
                                          ignore_status=True)
                check_exit_status(result)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected."
                                     % pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_env[1]:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
def run(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file.
    1) Use xml file from test parameters to create pool.
    2) Dump the exist pool to create pool.
    3) Negatively create pool(invalid option, no file, invalid file, duplicate
       pool name, and create pool under readonly mode).
    """

    pool_xml = params.get("pool_create_xml_file")
    pool_name = params.get("pool_create_name")
    option = params.get("pool_create_extra_option", "")
    use_exist_pool = "yes" == params.get("pool_create_use_exist_pool", "no")
    exist_pool_name = params.get("pool_create_exist_pool_name", "default")
    undefine_exist_pool = "yes" == params.get(
        "pool_create_undefine_exist_pool", "no")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")

    # Deal with each parameters
    # backup the exist pool
    pool_ins = libvirt_storage.StoragePool()
    if use_exist_pool:
        if not pool_ins.pool_exists(exist_pool_name):
            raise error.TestFail("Require pool: %s exist", exist_pool_name)
        backup_xml = libvirt_xml.PoolXML.backup_xml(exist_pool_name)
        pool_xml = backup_xml

    # Delete the exist pool
    start_pool = False
    if undefine_exist_pool:
        poolxml = libvirt_xml.PoolXML.new_from_dumpxml(exist_pool_name)
        poolxml.name = pool_name
        pool_xml = poolxml.xml
        if pool_ins.is_pool_active(exist_pool_name):
            start_pool = True
        if not pool_ins.delete_pool(exist_pool_name):
            raise error.TestFail("Delete pool: %s fail", exist_pool_name)

    # Create an invalid pool xml file
    if pool_xml == "invalid-pool-xml":
        tmp_xml = xml_utils.TempXMLFile()
        tmp_xml.write('"<pool><<<BAD>>><\'XML</name\>'
                      '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>')
        tmp_xml.flush()
        pool_xml = tmp_xml.name
        logging.info(pool_xml)

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    # Run virsh test
    if os.path.isfile(pool_xml):
        logging.debug("Create pool from file:\n %s", open(pool_xml, 'r').read())
    try:
        cmd_result = virsh.pool_create(
            pool_xml,
            option,
            ignore_status=True,
            debug=True,
            readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            elif not pool_check(pool_name, pool_ins):
                raise error.TestFail("Pool check fail")
        elif status_error and status == 0:
            raise error.TestFail("Expect fail, but run successfully.")
    finally:
        # Recover env
        if pool_ins.is_pool_active(pool_name):
            virsh.pool_destroy(pool_name)
        if undefine_exist_pool and not pool_ins.pool_exists(exist_pool_name):
            virsh.pool_define(backup_xml)
            if start_pool:
                pool_ins.start_pool(exist_pool_name)
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        if vm.is_alive():
            vm.destroy()
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
def run(test, params, env):
    """
    Test command: virsh pool-define;pool-start;vol-list pool;
    attach-device LUN to guest; mount the device, dd; unmount;
    reboot guest; mount the device, dd again; pool-destroy; pool-undefine;

    Create a libvirt npiv pool from an XML file. The test needs to have a wwpn
    and wwnn of a vhba in host which is zoned & mapped to a SAN controller.

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("pool_target_device", "sdc")
    pool_wwnn = params.get("pool_wwnn", "WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "WWPN_EXAMPLE")
    test_unit = None
    mount_disk = None

    if 'EXAMPLE' in pool_wwnn or 'EXAMPLE' in pool_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if not vm.is_alive():
        vm.start()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    online_hbas_list = nodedev.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)

    # if no online hba cards on host test fails
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    else:
        if pool_adapter_parent == "":
            pool_adapter_parent = online_hbas_list[0]

    kwargs = {'source_path': source_path,
              'source_name': source_name,
              'source_format': source_format,
              'pool_adapter_type': pool_adapter_type,
              'pool_adapter_parent': pool_adapter_parent,
              'pool_wwnn': pool_wwnn,
              'pool_wwpn': pool_wwpn}

    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    old_vhbas = nodedev.find_hbas("vhba")
    try:
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)
        utils_misc.wait_for(
            lambda: nodedev.is_vhbas_added(old_vhbas), _DELAY_TIME)
        virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
        virsh.pool_destroy(pool_name)
    except Exception, e:
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        raise exceptions.TestError(
            "Error occurred when prepare pool xml:\n %s" % e)
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster
    cleanup_env = [False, False, False, "", False]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user,
                 'debug': True}

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        if list_dumpxml_acl:
            result = virsh.pool_list(option, **acl_dargs)
        else:
            result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    kwargs = {'source_format': params.get('pool_source_format', 'ext4')}
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utlv.define_pool(pool_name, pool_type, pool_target,
                                  cleanup_env, **kwargs)
        utlv.check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        if list_dumpxml_acl:
            xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs)
        else:
            xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        utlv.check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            utlv.check_exit_status(result)

        # Step (3)
        # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool
        # disk/fs pool: as prepare step already make label and create filesystem
        #               for the disk, use '--overwrite' is necessary
        # logical_pool: build pool will fail if VG already exist, BZ#1373711
        if pool_type != "logical":
            option = ''
            if pool_type in ['disk', 'fs']:
                option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name, option,
                                          ignore_status=True)
            utlv.check_exit_status(result, build_error)
        if build_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_build(pool_name, option,
                                      ignore_status=True)
            utlv.check_exit_status(result)

        # For iSCSI pool, we need discover targets before start the pool
        if pool_type == 'iscsi':
            cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1'
            process.run(cmd, shell=True)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected."
                                     % pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            utlv.check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        utlv.check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            utlv.check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = process.system_output(cmd, shell=True)
            lv_utils.vg_remove(vg_name)
            process.run("pvremove %s" % pv_name, shell=True)
        if cleanup_env[1]:
            utlv.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utlv.setup_or_cleanup_nfs(
                False, restore_selinux=cleanup_env[3])
Exemple #31
0
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster
    cleanup_env = [False, False, False, "", False]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    acl_dargs = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True
    }

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        if list_dumpxml_acl:
            result = virsh.pool_list(option, **acl_dargs)
        else:
            result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    kwargs = {'source_format': params.get('pool_source_format', 'ext4')}
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utlv.define_pool(pool_name, pool_type, pool_target,
                                  cleanup_env, **kwargs)
        utlv.check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        if list_dumpxml_acl:
            xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs)
        else:
            xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        utlv.check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            utlv.check_exit_status(result)

        # Step (3)
        # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool
        # disk/fs pool: as prepare step already make label and create filesystem
        #               for the disk, use '--overwrite' is necessary
        # logical_pool: build pool will fail if VG already exist, BZ#1373711
        if pool_type != "logical":
            option = ''
            if pool_type in ['disk', 'fs']:
                option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name,
                                          option,
                                          ignore_status=True)
            utlv.check_exit_status(result, build_error)
        if build_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)

        # For iSCSI pool, we need discover targets before start the pool
        if pool_type == 'iscsi':
            cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1'
            process.run(cmd, shell=True)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected." %
                                     pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            utlv.check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        utlv.check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            utlv.check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = process.system_output(cmd, shell=True)
            lv_utils.vg_remove(vg_name)
            process.run("pvremove %s" % pv_name, shell=True)
        if cleanup_env[1]:
            utlv.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utlv.setup_or_cleanup_nfs(False, restore_selinux=cleanup_env[3])
     if virsh.domain_exists(vm_name):
         if active_snap or with_shallow:
             option = "--snapshots-metadata"
         else:
             option = None
         original_xml.sync(option)
     else:
         original_xml.define()
 except Exception, e:
     logging.error(e)
 for disk in snapshot_external_disks:
     if os.path.exists(disk):
         os.remove(disk)
 # Clean up libvirt pool, which may be created by 'set_vm_disk'
 if disk_type == 'volume':
     virsh.pool_destroy(pool_name, ignore_status=True, debug=True)
 # Restore libvirtd conf and restart libvirtd
 libvirtd_conf.restore()
 libvirtd_utl.restart()
 if libvirtd_log_path and os.path.exists(libvirtd_log_path):
     os.unlink(libvirtd_log_path)
 # Clean up NFS
 try:
     if nfs_cleanup:
         utl.setup_or_cleanup_nfs(is_setup=False)
 except Exception, e:
     logging.error(e)
 # Clean up iSCSI
 try:
     for iscsi_n in list(set(emulated_iscsi)):
         utl.setup_or_cleanup_iscsi(is_setup=False,
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocol,
                               'source_name': iscsi_target + "/%s" % lun_num,
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_mode': disk_src_mode}
        else:
            test.cancel("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocol,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
                            % (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
Exemple #34
0
        # Step (9)
        result = virsh.vol_create_as(vol_name, pool_name, "1048576",
                                     "1048576", "raw", ignore_status=True)
        if result.exit_status != 0:
            raise error.TestFail("Command virsh vol-create-as failed:\n%s" %
                                 result.stdout)
        else:
            logging.debug("Volume: %s successfully created on pool: %s",
                          vol_name, pool_name)

        if not check_vol_list(vol_name, pool_name, pool_target):
            raise error.TestFail("Volume %s is not found in the "
                                 "output of virsh vol-list" % vol_name)

        # Step (10)
        if not virsh.pool_destroy(pool_name):
            raise error.TestFail("Command virsh pool-destroy failed")
        else:
            logging.debug("Pool: %s destroyed successfully", pool_name)

        if not check_list_state(pool_name, "inactive"):
            raise error.TestFail("State of pool: %s marked as active"
                                 "instead of inactive" % pool_name)
        if not check_list_autostart(pool_name, "yes"):
            raise error.TestFail("Autostart of pool: %s marked as no"
                                 "instead of yes" % pool_name)

        # Step (11)
        if check_vol_list(vol_name, pool_name, pool_target):
            raise error.TestFail("Command virsh vol-list succeeded"
                                 " on an inactive pool")
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare pool, volume.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def create_pool(p_name, p_type, p_target):
        """
        Define and start a pool.

        :param p_name. Pool name.
        :param p_type. Pool type.
        :param p_target. Pool target path.
        """
        p_xml = pool_xml.PoolXML(pool_type=p_type)
        p_xml.name = p_name
        p_xml.target_path = p_target

        if not os.path.exists(p_target):
            os.mkdir(p_target)
        p_xml.xmltreefile.write()
        ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_build(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_start(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, target_encrypt_params, vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param target_encrypt_params encrypt parameters in dict.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.encryption = volxml.new_encryption(**target_encrypt_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_secret(vol_path):
        """
        Create secret.

        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        libvirt.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as e:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(secret_password_no_encoded.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string,
                                     **virsh_dargs)
        libvirt.check_exit_status(ret)
        return encryption_uuid

    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status("rpm -q parted || "
                                          "yum install -y parted", 300)
            if rpm_stat != 0:
                test.fail("Failed to query/install parted, make sure"
                          " that you have usable repo in guest")

            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            device_source = os.path.join(os.sep, 'dev', added_part)
            libvirt.mk_label(device_source, session=session)
            libvirt.mk_part(device_source, size="10M", session=session)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % added_part)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            session.close()
            if s != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    encryption_in_source = "yes" == params.get("encryption_in_source")
    encryption_out_source = "yes" == params.get("encryption_out_source")
    if encryption_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Cannot put <encryption> inside disk <source> in "
                    "this libvirt version.")
    # Pool/Volume options.
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    volume_name = params.get("vol_name")
    volume_alloc = params.get("vol_alloc")
    volume_cap_unit = params.get("vol_cap_unit")
    volume_cap = params.get("vol_cap")
    volume_target_path = params.get("target_path")
    volume_target_format = params.get("target_format")
    volume_target_encypt = params.get("target_encypt", "")
    volume_target_label = params.get("target_label")

    hotplug = "yes" == params.get("virt_disk_device_hotplug")
    status_error = "yes" == params.get("status_error")
    secret_type = params.get("secret_type", "passphrase")
    secret_password_no_encoded = params.get("secret_password_no_encoded", "redhat")
    virt_disk_qcow2_format = "yes" == params.get("virt_disk_qcow2_format")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    sec_encryption_uuid = None
    try:
        # Prepare the disk.
        sec_uuids = []
        create_pool(pool_name, pool_type, pool_target)
        vol_params = {"name": volume_name, "capacity": int(volume_cap),
                      "allocation": int(volume_alloc), "format":
                      volume_target_format, "path": volume_target_path,
                      "label": volume_target_label,
                      "capacity_unit": volume_cap_unit}
        vol_encryption_params = {}
        vol_encryption_params.update({"format": volume_target_encypt})
        # For any disk format other than qcow2, it need create secret firstly.
        if not virt_disk_qcow2_format:
            # create secret.
            sec_encryption_uuid = create_secret(volume_target_path)
            sec_uuids.append(sec_encryption_uuid)
            vol_encryption_params.update({"secret": {"type": secret_type, "uuid": sec_encryption_uuid}})
        try:
            # If Libvirt version is lower than 2.5.0
            # Creating luks encryption volume is not supported,so skip it.
            create_vol(pool_name, vol_encryption_params, vol_params)
        except AssertionError as info:
            err_msgs = ("create: invalid option")
            if str(info).count(err_msgs):
                test.error("Creating luks encryption volume "
                           "is not supported on this libvirt version")
            else:
                test.error("Failed to create volume."
                           "Error: %s" % str(info))
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        if device_type == "file":
            dev_attrs = "file"
        elif device_type == "dir":
            dev_attrs = "dir"
        else:
            dev_attrs = "dev"
        disk_source = disk_xml.new_disk_source(
                **{"attrs": {dev_attrs: volume_target_path}})
        disk_xml.driver = {"name": "qemu", "type": volume_target_format,
                           "cache": "none"}
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuids.append(v_xml.encryption.secret["uuid"])
        if not status_error:
            logging.debug("vol info -- format: %s, type: %s, uuid: %s",
                          v_xml.encryption.format,
                          v_xml.encryption.secret["type"],
                          v_xml.encryption.secret["uuid"])
            encryption_dict = {"encryption": v_xml.encryption.format,
                               "secret": {"type": v_xml.encryption.secret["type"],
                                          "uuid": v_xml.encryption.secret["uuid"]}}
            if encryption_in_source:
                disk_source.encryption = disk_xml.new_encryption(
                        **encryption_dict)
            if encryption_out_source:
                disk_xml.encryption = disk_xml.new_encryption(
                        **encryption_dict)
        disk_xml.source = disk_source
        logging.debug("disk xml is:\n%s" % disk_xml)
        if not hotplug:
            # Sync VM xml.
            vmxml.add_device(disk_xml)
            vmxml.sync()

        try:
            # Start the VM and do disk hotplug if required,
            # then check disk status in vm.
            # Note that LUKS encrypted virtual disk without <encryption>
            # can be normally started or attached since qemu will just treat
            # it as RAW, so we don't test LUKS with status_error=TRUE.
            vm.start()
            vm.wait_for_login()
            if status_error:
                if hotplug:
                    logging.debug("attaching disk, expecting error...")
                    result = virsh.attach_device(vm_name, disk_xml.xml)
                    libvirt.check_exit_status(result, status_error)
                else:
                    test.fail("VM started unexpectedly.")
            else:
                if hotplug:
                    result = virsh.attach_device(vm_name, disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
                    result = virsh.detach_device(vm_name, disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                else:
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
        except virt_vm.VMStartError as e:
            if status_error:
                if hotplug:
                    test.fail("In hotplug scenario, VM should "
                              "start successfully but not."
                              "Error: %s", str(e))
                else:
                    logging.debug("VM failed to start as expected."
                                  "Error: %s", str(e))
            else:
                # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
                # longer supported.
                err_msgs = ("AES-CBC encrypted qcow2 images is"
                            " no longer supported in system emulators")
                if str(e).count(err_msgs):
                    test.cancel(err_msgs)
                else:
                    test.fail("VM failed to start."
                              "Error: %s" % str(e))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
Exemple #36
0
def run(test, params, env):
    """
    Test command: virsh pool-define;pool-start;vol-list pool;
    attach-device LUN to guest; mount the device, dd; unmount;
    reboot guest; mount the device, dd again; pool-destroy; pool-undefine;

    Create a libvirt npiv pool from an XML file. The test needs to have a wwpn
    and wwnn of a vhba in host which is zoned & mapped to a SAN controller.

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("pool_target_device", "sdc")
    pool_wwnn = params.get("pool_wwnn", "WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "WWPN_EXAMPLE")
    test_unit = None
    mount_disk = None

    if 'EXAMPLE' in pool_wwnn or 'EXAMPLE' in pool_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if not vm.is_alive():
        vm.start()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)

    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    online_hbas_list = nodedev.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)

    # if no online hba cards on host test fails
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    else:
        if pool_adapter_parent == "":
            pool_adapter_parent = online_hbas_list[0]

    kwargs = {
        'source_path': source_path,
        'source_name': source_name,
        'source_format': source_format,
        'pool_adapter_type': pool_adapter_type,
        'pool_adapter_parent': pool_adapter_parent,
        'pool_wwnn': pool_wwnn,
        'pool_wwpn': pool_wwpn
    }

    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    old_vhbas = nodedev.find_hbas("vhba")
    try:
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)
        utils_misc.wait_for(lambda: nodedev.is_vhbas_added(old_vhbas),
                            _DELAY_TIME)
        virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
        virsh.pool_destroy(pool_name)
    except Exception, e:
        pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
        raise exceptions.TestError(
            "Error occurred when prepare pool xml:\n %s" % e)
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd.encode()).decode()
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocol,
                               'source_name': iscsi_target + "/%s" % lun_num,
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_mode': disk_src_mode}
        else:
            test.cancel("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocol,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(test.tmpdir, snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
                            % (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        if vm.is_alive():
            vm.destroy()
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
def run(test, params, env):
    """
    Test pool command:virsh pool_autostart

    1) Define a given type pool
    2) Mark pool as autostart
    3) Restart libvirtd and check pool
    4) Destroy the pool
    5) Unmark pool as autostart
    6) Repeate step(3)
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    ip_protocal = params.get("ip_protocal", "ipv4")
    pool_ref = params.get("pool_ref", "name")
    pool_uuid = params.get("pool_uuid", "")
    invalid_source_path = params.get("invalid_source_path", "")
    status_error = "yes" == params.get("status_error", "no")
    readonly_mode = "yes" == params.get("readonly_mode", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "yes")
    disk_type = params.get("disk_type", "")
    vg_name = params.get("vg_name", "")
    lv_name = params.get("lv_name", "")
    update_policy = params.get("update_policy")

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    if pool_target is "":
        pool_target = os.path.join(test.tmpdir, pool_target)

    # The file for dumped pool xml
    p_xml = os.path.join(test.tmpdir, "pool.xml.tmp")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        test.fail("Pool %s already exist" % pool_name)

    def check_pool(pool_name, pool_type, checkpoint,
                   expect_value="", expect_error=False):
        """
        Check the pool after autostart it

        :param pool_name: Name of the pool.
        :param pool_type: Type of the pool.
        :param checkpoint: Which part for checking.
        :param expect_value: Expected value.
        :param expect_error: Boolen value, expect command success or fail
        """
        libvirt_pool = libvirt_storage.StoragePool()
        virsh.pool_list(option="--all", debug=True)
        if checkpoint == 'State':
            actual_value = libvirt_pool.pool_state(pool_name)
        if checkpoint == 'Autostart':
            actual_value = libvirt_pool.pool_autostart(pool_name)
        if actual_value != expect_value:
            if not expect_error:
                if checkpoint == 'State' and pool_type in ("dir", "scsi"):
                    error_msg = "Dir pool should be always active when libvirtd restart. "
                    error_msg += "See https://bugzilla.redhat.com/show_bug.cgi?id=1238610"
                    logging.error(error_msg)
                test.fail("Pool %s isn't %s as expected" % (checkpoint, expect_value))
            else:
                logging.debug("Pool %s is %s as expected", checkpoint, actual_value)

    def change_source_path(new_path, update_policy="set"):
        n_poolxml = pool_xml.PoolXML()
        n_poolxml = n_poolxml.new_from_dumpxml(pool_name)
        s_xml = n_poolxml.get_source()
        s_xml.device_path = new_path
        if update_policy == "set":
            n_poolxml.set_source(s_xml)
        elif update_policy == "add":
            n_poolxml.add_source("device", {"path": new_path})
        else:
            test.error("Unsupported policy type")
        logging.debug("After change_source_path:\n%s" %
                      open(n_poolxml.xml).read())
        return n_poolxml

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {'image_size': '1G', 'pre_disk_vol': ['100M'],
              'source_name': source_name, 'source_path': source_path,
              'source_format': source_format, 'persistent': True,
              'ip_protocal': ip_protocal}
    pool = pool_name
    clean_mount = False
    new_device = None
    try:
        if pre_def_pool:
            # Step(1)
            # Pool define
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
            # Remove the partition for disk pool
            # For sometimes the partition will cause pool start failed
            if pool_type == "disk":
                virsh.pool_build(pool_name, "--overwrite", debug=True)
            # Get pool uuid:
            if pool_ref == "uuid" and not pool_uuid:
                pool = pool_ins.get_pool_uuid(pool_name)

            # Setup logical block device
            # Change pool source path
            # Undefine pool
            # Define pool with new xml
            # Start pool
            if update_policy:
                new_device = utlv.setup_or_cleanup_iscsi(True)
                lv_utils.vg_create(vg_name, new_device)
                new_device = utlv.create_local_disk(disk_type, size="0.5",
                                                    vgname=vg_name, lvname=lv_name)
                new_path = new_device
                if invalid_source_path:
                    new_path = invalid_source_path
                if pool_type == "fs":
                    utlv.mkfs(new_device, source_format)
                n_poolxml = change_source_path(new_path, update_policy)
                p_xml = n_poolxml.xml
                if not virsh.pool_undefine(pool_name):
                    test.fail("Undefine pool %s failed" % pool_name)
                if not virsh.pool_define(p_xml):
                    test.fail("Define pool %s from %s failed" % (pool_name, p_xml))
                logging.debug("Start pool %s" % pool_name)
                result = virsh.pool_start(pool_name, ignore_status=True, debug=True)
                utlv.check_exit_status(result, status_error)
                # Mount a valid fs to pool target
                if pool_type == "fs":
                    source_list = []
                    mnt_cmd = ""
                    pool_target = n_poolxml.target_path
                    if invalid_source_path:
                        source_list.append(new_device)
                    else:
                        s_devices = n_poolxml.xmltreefile.findall("//source/device")
                        for dev in s_devices:
                            source_list.append(dev.get('path'))
                    try:
                        for src in source_list:
                            mnt_cmd = "mount %s %s" % (src, pool_target)
                            if not process.system(mnt_cmd, shell=True):
                                clean_mount = True
                    except process.CmdError:
                        test.error("Failed to run %s" % mnt_cmd)

        # Step(2)
        # Pool autostart
        logging.debug("Try to mark pool %s as autostart" % pool_name)
        result = virsh.pool_autostart(pool, readonly=ro_flag,
                                      ignore_status=True, debug=True)
        if not pre_def_pool:
            utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            check_pool(pool_name, pool_type, checkpoint='Autostart',
                       expect_value="yes", expect_error=status_error)

            # Step(3)
            # Restart libvirtd and check pool status
            logging.info("Try to restart libvirtd")
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            check_pool(pool_name, pool_type, checkpoint="State",
                       expect_value="active", expect_error=status_error)

            # Step(4)
            # Pool destroy
            if pool_ins.is_pool_active(pool_name):
                virsh.pool_destroy(pool_name)
                logging.debug("Pool %s destroyed" % pool_name)

            # Step(5)
            # Pool autostart disable
            logging.debug("Try to unmark pool %s as autostart" % pool_name)
            result = virsh.pool_autostart(pool, extra="--disable", debug=True,
                                          ignore_status=True)
            if not pre_def_pool:
                utlv.check_exit_status(result, status_error)
            if not result.exit_status:
                check_pool(pool_name, pool_type, checkpoint='Autostart',
                           expect_value="no", expect_error=status_error)

                # Repeat step (3)
                logging.debug("Try to restart libvirtd")
                libvirtd = utils_libvirtd.Libvirtd()
                libvirtd.restart()
                check_pool(pool_name, pool_type, checkpoint='State',
                           expect_value="inactive", expect_error=status_error)
    finally:
        # Clean up
        logging.debug("Try to clean up env")
        try:
            if clean_mount is True:
                for src in source_list:
                    process.system("umount %s" % pool_target)
            if pre_def_pool:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image, **kwargs)
            if new_device:
                utlv.delete_local_disk(disk_type, vgname=vg_name, lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                utlv.setup_or_cleanup_iscsi(False)
            if os.path.exists(p_xml):
                os.remove(p_xml)
        except test.fail as details:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.error(str(details))
     if virsh.domain_exists(vm_name):
         if active_snap or with_shallow:
             option = "--snapshots-metadata"
         else:
             option = None
         original_xml.sync(option)
     else:
         original_xml.define()
 except Exception, e:
     logging.error(e)
 for disk in snapshot_external_disks:
     if os.path.exists(disk):
         os.remove(disk)
 # Clean up libvirt pool, which may be created by 'set_vm_disk'
 if disk_type == 'volume':
     virsh.pool_destroy(pool_name, ignore_status=True, debug=True)
 # Clean up NFS
 try:
     if replace_vm_disk and disk_source_protocol == "netfs":
         utl.setup_or_cleanup_nfs(is_setup=False)
 except Exception, e:
     logging.error(e)
 # Clean up iSCSI
 try:
     for iscsi_n in list(set(emulated_iscsi)):
         utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n)
 except Exception, e:
     logging.error(e)
 if os.path.exists(dest_path):
     os.remove(dest_path)
 if os.path.exists(snap_path):
Exemple #40
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get('ip_protocal', 'ipv4')

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find volume '%s' in pool '%s'.", vol_name,
                          pool_name)
        else:
            raise error.TestFail("Not find volume '%s' in pool '%s'." %
                                 (vol_name, pool_name))

    def is_in_range(actual, expected, error_percent):
        deviation = 100 - (100 * (float(actual) / float(expected)))
        logging.debug("Deviation: %0.2f%%", float(deviation))
        return float(deviation) <= float(error_percent)

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            raise error.TestFail("Pool info dictionary is needed.")
        val_tup = ('Capacity', 'Allocation', 'Available')
        if check_point in val_tup and float(value.split()[0]):
            # As from bytes to GiB, could cause deviation, and it should not
            # exceed 1 percent.
            if is_in_range(float(pool_info[check_point].split()[0]),
                           float(value.split()[0]), 1):
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                raise error.TestFail("Pool '%s' isn't '%s'." %
                                     (check_point, value))
        else:
            if pool_info[check_point] == value:
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                raise error.TestFail("Pool '%s' isn't '%s'." %
                                     (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {
        'image_size': '1G',
        'pre_disk_vol': ['1M'],
        'source_name': source_name,
        'source_path': source_path,
        'source_format': source_format,
        'persistent': True,
        'ip_protocal': ip_protocal
    }
    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (4)
        # Undefine pool
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(pool_xml)
        utlv.check_exit_status(result, status_error)

        # Step (6)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
            # Options --overwrite and --no-overwrite can only be used to
            # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)

        # Step (7)
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        # Step (8)
        # Pool list
        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (9)
        # Pool autostart
        result = virsh.pool_autostart(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        # Step (10)
        # Pool list
        option = "--autostart --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (11)
        # Restart libvirtd and check the autostart pool
        utils_libvirtd.libvirtd_restart()
        option = "--autostart --persistent"
        check_pool_list(pool_name, option)

        # Step (12)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (13)
        # Pool autostart disable
        result = virsh.pool_autostart(pool_name,
                                      "--disable",
                                      ignore_status=True)
        utlv.check_exit_status(result)

        # Step (14)
        # Repeat step (11)
        utils_libvirtd.libvirtd_restart()
        option = "--autostart"
        check_pool_list(pool_name, option, True)

        # Step (15)
        # Pool start
        # When libvirtd starts up, it'll check to see if any of the storage
        # pools have been activated externally. If so, then it'll mark the
        # pool as active. This is independent of autostart.
        # So a directory based storage pool is thus pretty much always active,
        # and so as the SCSI pool.
        if pool_type not in ["dir", 'scsi']:
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

        # Step (16)
        # Pool info
        pool_info = _pool.pool_info(pool_name)
        logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

        # Step (17)
        # Pool UUID
        result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_info(pool_info, "UUID", result.stdout.strip())

        # Step (18)
        # Pool Name
        result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_info(pool_info, "Name", result.stdout.strip())

        # Step (19)
        # Pool refresh for 'dir' type pool
        if pool_type == "dir":
            os.mknod(vol_path)
            result = virsh.pool_refresh(pool_name)
            utlv.check_exit_status(result)
            check_vol_list(vol_name, pool_name)

        # Step (20)
        # Create an over size vol in pool(expect fail), then check pool:
        # 'Capacity', 'Allocation' and 'Available'
        # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
        # and glusterfs pool not support create volume, so not test them
        if pool_type != "netfs":
            vol_capacity = "10000G"
            vol_allocation = "10000G"
            result = virsh.vol_create_as("oversize_vol", pool_name,
                                         vol_capacity, vol_allocation, "raw")
            utlv.check_exit_status(result, True)
            new_info = _pool.pool_info(pool_name)
            check_pool_info(pool_info, "Capacity", new_info['Capacity'])
            check_pool_info(pool_info, "Allocation", new_info['Allocation'])
            check_pool_info(pool_info, "Available", new_info['Available'])

        # Step (21)
        # Undefine pool, this should fail as the pool is active
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result, expect_error=True)
        check_pool_list(pool_name, "", False)

        # Step (22)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (23)
        # Pool delete for 'dir' type pool
        if pool_type == "dir":
            for f in os.listdir(pool_target):
                os.remove(os.path.join(pool_target, f))
            result = virsh.pool_delete(pool_name, ignore_status=True)
            utlv.check_exit_status(result)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if os.path.exists(pool_target):
                raise error.TestFail("The target path '%s' still exist." %
                                     pool_target)
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result, True)

        # Step (24)
        # Pool undefine
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **kwargs)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
Exemple #41
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get("ip_protocal", "ipv4")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current" " libvirt version.")

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find volume '%s' in pool '%s'.", vol_name, pool_name)
        else:
            raise error.TestFail("Not find volume '%s' in pool '%s'." % (vol_name, pool_name))

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            raise error.TestFail("Pool info dictionary is needed.")
        if pool_info[check_point] == value:
            logging.debug("Pool '%s' is '%s'.", check_point, value)
        else:
            raise error.TestFail("Pool '%s' isn't '%s'." % (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {
        "image_size": "1G",
        "pre_disk_vol": ["1M"],
        "source_name": source_name,
        "source_path": source_path,
        "source_format": source_format,
        "persistent": True,
        "ip_protocal": ip_protocal,
    }
    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (4)
        # Undefine pool
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(pool_xml)
        check_exit_status(result, status_error)

        # Step (6)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
            # Options --overwrite and --no-overwrite can only be used to
            # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            check_exit_status(result)

        # Step (7)
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)

        # Step (8)
        # Pool list
        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (9)
        # Pool autostart
        result = virsh.pool_autostart(pool_name, ignore_status=True)
        check_exit_status(result)

        # Step (10)
        # Pool list
        option = "--autostart --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (11)
        # Restart libvirtd and check the autostart pool
        utils_libvirtd.libvirtd_restart()
        option = "--autostart --persistent"
        check_pool_list(pool_name, option)

        # Step (12)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (13)
        # Pool autostart disable
        result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True)
        check_exit_status(result)

        # Step (14)
        # Repeat step (11)
        utils_libvirtd.libvirtd_restart()
        option = "--autostart"
        check_pool_list(pool_name, option, True)

        # Step (15)
        # Pool start
        # When libvirtd starts up, it'll check to see if any of the storage
        # pools have been activated externally. If so, then it'll mark the
        # pool as active. This is independent of autostart.
        # So a directory based storage pool is thus pretty much always active,
        # and so as the SCSI pool.
        if pool_type != "scsi" and (pool_type != "dir" or libvirt_version.version_compare(1, 2, 15)):
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        # Step (16)
        # Pool info
        pool_info = _pool.pool_info(pool_name)
        logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

        # Step (17)
        # Pool UUID
        result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
        check_exit_status(result)
        check_pool_info(pool_info, "UUID", result.stdout.strip())

        # Step (18)
        # Pool Name
        result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
        check_exit_status(result)
        check_pool_info(pool_info, "Name", result.stdout.strip())

        # Step (19)
        # Pool refresh for 'dir' type pool
        if pool_type == "dir":
            os.mknod(vol_path)
            result = virsh.pool_refresh(pool_name)
            check_exit_status(result)
            check_vol_list(vol_name, pool_name)

        # Step (20)
        # Create an over size vol in pool(expect fail), then check pool:
        # 'Capacity', 'Allocation' and 'Available'
        # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
        # and glusterfs pool not support create volume, so not test them
        if pool_type != "netfs":
            vol_capacity = "10000G"
            vol_allocation = "10000G"
            result = virsh.vol_create_as("oversize_vol", pool_name, vol_capacity, vol_allocation, "raw")
            check_exit_status(result, True)
            new_info = _pool.pool_info(pool_name)
            check_pool_info(pool_info, "Capacity", new_info["Capacity"])
            check_pool_info(pool_info, "Allocation", new_info["Allocation"])
            check_pool_info(pool_info, "Available", new_info["Available"])

        # Step (21)
        # Undefine pool, this should fail as the pool is active
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result, expect_error=True)
        check_pool_list(pool_name, "", False)

        # Step (22)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (23)
        # Pool delete for 'dir' type pool
        if pool_type == "dir":
            for f in os.listdir(pool_target):
                os.remove(os.path.join(pool_target, f))
            result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if os.path.exists(pool_target):
                raise error.TestFail("The target path '%s' still exist." % pool_target)
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result, True)

        # Step (24)
        # Pool undefine
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
Exemple #42
0
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    disk_type = params.get("disk_type")
    pool_name = params.get("pool_name")
    image_size = params.get("image_size")
    emu_image = params.get("emulated_image")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    check_finish_job = "yes" == params.get("check_finish_job", "yes")
    with_shallow = "yes" == params.get("with_shallow", "no")
    with_blockdev = "yes" == params.get("with_blockdev", "no")
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    relative_path = params.get("relative_path")
    rerun_flag = 0
    blkdev_n = None
    back_n = 'blockdev-backing-iscsi'
    snapshot_external_disks = []
    snapshots_take = int(params.get("snapshots_take", '0'))
    external_disk_only_snapshot = "yes" == params.get(
        "external_disk_only_snapshot", "no")
    enable_iscsi_auth = "yes" == params.get("enable_iscsi_auth", "no")

    # Skip/Fail early
    if with_blockdev and not libvirt_version.version_compare(1, 2, 13):
        raise exceptions.TestSkipError("--blockdev option not supported in "
                                       "current version")
    if not target:
        raise exceptions.TestSkipError("Require target disk to copy")
    if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("API acl test not supported in current"
                                       " libvirt version")
    if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url)
    if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3):
        raise exceptions.TestSkipError("--bytes option not supported in "
                                       "current version")
    if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0):
        test.cancel(
            "Forbid using relative path or file name only is added since libvirt-3.0.0"
        )

    if "--transient-job" in options and not libvirt_version.version_compare(
            4, 5, 0):
        test.cancel(
            "--transient-job option is supported until libvirt 4.5.0 version")

    # Check the source disk
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s", target, vm_name)
    else:
        raise exceptions.TestFail("Can't find %s in domain %s" %
                                  (target, vm_name))

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_tmp_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external --wait"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += " --raw"
    if with_blockdev:
        options += " --blockdev"
    if len(bandwidth):
        options += " --bandwidth %s" % bandwidth
    if bandwidth_byte:
        options += " --bytes"
    if with_shallow:
        options += " --shallow"

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    extra_dict = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True,
        'ignore_status': True,
        'timeout': timeout
    }

    libvirtd_utl = utils_libvirtd.Libvirtd()
    libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(),
                                     "libvirt_daemons.log")
    libvirtd_conf_dict = {
        "log_filter": '"3:json 1:libvirt 1:qemu"',
        "log_outputs": '"1:file:%s"' % libvirtd_log_path
    }
    logging.debug("the libvirtd conf file content is :\n %s" %
                  libvirtd_conf_dict)
    libvirtd_conf = utl.customize_libvirt_config(libvirtd_conf_dict)

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s", dest_path, expect)
        else:
            raise exceptions.TestFail("%s format is not %s" %
                                      (dest_path, expect))

    def _blockjob_and_libvirtd_chk(cmd_result):
        """
        Raise TestFail when blockcopy fail with block-job-complete error or
        blockcopy hang with state change lock.
        This is a specific bug verify, so ignore status_error here.
        """
        failure_msg = ""
        err_msg = "internal error: unable to execute QEMU command"
        err_msg += " 'block-job-complete'"
        if err_msg in cmd_result.stderr:
            failure_msg += "Virsh cmd error happened: %s\n" % err_msg
        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            failure_msg += "Libvirtd log error happened: %s\n" % err_pattern
        if failure_msg:
            if not libvirt_version.version_compare(1, 3, 2):
                bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592"
                failure_msg += "Hit on bug: %s " % bug_url_
            test.fail(failure_msg)

    def _make_snapshot(snapshot_numbers_take):
        """
        Make external disk snapshot

        :param snapshot_numbers_take: snapshot numbers.
        """
        for count in range(0, snapshot_numbers_take):
            snap_xml = snapshot_xml.SnapshotXML()
            snapshot_name = "blockcopy_snap"
            snap_xml.snap_name = snapshot_name + "_%s" % count
            snap_xml.description = "blockcopy snapshot"

            # Add all disks into xml file.
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            # Remove non-storage disk such as 'cdrom'
            for disk in disks:
                if disk.device != 'disk':
                    disks.remove(disk)
            new_disks = []
            src_disk_xml = disks[0]
            disk_xml = snap_xml.SnapDiskXML()
            disk_xml.xmltreefile = src_disk_xml.xmltreefile
            del disk_xml.device
            del disk_xml.address
            disk_xml.snapshot = "external"
            disk_xml.disk_name = disk_xml.target['dev']

            # Only qcow2 works as external snapshot file format, update it
            # here
            driver_attr = disk_xml.driver
            driver_attr.update({'type': 'qcow2'})
            disk_xml.driver = driver_attr

            new_attrs = disk_xml.source.attrs
            if 'file' in disk_xml.source.attrs:
                new_file = os.path.join(tmp_dir,
                                        "blockcopy_shallow_%s.snap" % count)
                snapshot_external_disks.append(new_file)
                new_attrs.update({'file': new_file})
                hosts = None
            elif ('dev' in disk_xml.source.attrs
                  or 'name' in disk_xml.source.attrs
                  or 'pool' in disk_xml.source.attrs):
                if (disk_xml.type_name == 'block'
                        or disk_source_protocol == 'iscsi'):
                    disk_xml.type_name = 'block'
                    if 'name' in new_attrs:
                        del new_attrs['name']
                        del new_attrs['protocol']
                    elif 'pool' in new_attrs:
                        del new_attrs['pool']
                        del new_attrs['volume']
                        del new_attrs['mode']
                    back_path = utl.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=True,
                        image_size="1G",
                        emulated_image=back_n)
                    emulated_iscsi.append(back_n)
                    cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                    process.run(cmd, shell=True)
                    new_attrs.update({'dev': back_path})
                    hosts = None

            new_src_dict = {"attrs": new_attrs}
            if hosts:
                new_src_dict.update({"hosts": hosts})
            disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

            new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options = "--disk-only --xmlfile %s " % snapshot_xml_path

            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)

            if snapshot_result.exit_status != 0:
                raise exceptions.TestFail(snapshot_result.stderr)

    snap_path = ''
    save_path = ''
    emulated_iscsi = []
    nfs_cleanup = False
    try:
        # Prepare dest_path
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if not dest_path:
            if enable_iscsi_auth:
                utils_secret.clean_up_secrets()
                setup_auth_enabled_iscsi_disk(vm, params)
                dest_path = os.path.join(tmp_dir, tmp_file)
            elif with_blockdev:
                blkdev_n = 'blockdev-iscsi'
                dest_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size=image_size,
                                                       emulated_image=blkdev_n)
                emulated_iscsi.append(blkdev_n)
                # Make sure the new disk show up
                utils_misc.wait_for(lambda: os.path.exists(dest_path), 5)
            else:
                if copy_to_nfs:
                    tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
                dest_path = os.path.join(tmp_dir, tmp_file)

        # Domain disk replacement with desire type
        if replace_vm_disk:
            # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs
            # after test, such as pool, volume, nfs, iscsi and so on
            # TODO: remove this function in the future
            if disk_source_protocol == 'iscsi':
                emulated_iscsi.append(emu_image)
            if disk_source_protocol == 'netfs':
                nfs_cleanup = True
            utl.set_vm_disk(vm, params, tmp_dir, test)
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        if with_shallow or external_disk_only_snapshot or enable_iscsi_auth:
            _make_snapshot(snapshots_take)

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine()
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command to create destination file
        if rerun_flag == 1:
            options1 = "--wait %s --finish --verbose" % dest_format
            if with_blockdev:
                options1 += " --blockdev"
            if with_shallow:
                options1 += " --shallow"
            cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise exceptions.TestFail("Run blockcopy command fail: %s" %
                                          cmd_result.stdout.strip() +
                                          cmd_result.stderr)
            elif not os.path.exists(dest_path):
                raise exceptions.TestFail("Cannot find the created copy")

        if "--transient-job" in options:
            pool = ThreadPool(processes=1)
            async_result = pool.apply_async(
                blockcopy_thread, (vm_name, target, dest_path, options))
            kill_blockcopy_process()
            utl.check_blockjob(vm_name, target)
            return

        # Run the real testing command
        cmd_result = virsh.blockcopy(vm_name, target, dest_path, options,
                                     **extra_dict)

        # check BZ#1197592
        _blockjob_and_libvirtd_chk(cmd_result)
        status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise exceptions.TestFail("Libvirtd service is dead")

        if not status_error:
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise exceptions.TestFail("Domain xml not expected after"
                                              " blockcopy")
                if options.count("--bandwidth"):
                    if options.count('--bytes'):
                        bandwidth += 'B'
                    else:
                        bandwidth += 'M'
                    if not (bandwidth
                            in ['0B', '0M']) and not utl.check_blockjob(
                                vm_name, target, "bandwidth", bandwidth):
                        raise exceptions.TestFail("Check bandwidth failed")
                val = options.count("--pivot") + options.count("--finish")
                # Don't wait for job finish when using --byte option
                val += options.count('--bytes')
                if val == 0 and check_finish_job:
                    try:
                        finish_job(vm_name, target, timeout)
                    except JobTimeout as excpt:
                        raise exceptions.TestFail("Run command failed: %s" %
                                                  excpt)
                if options.count("--raw") and not with_blockdev:
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name,
                                                   snap_opt,
                                                   ignore_status=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name,
                                     save_path,
                                     ignore_status=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
                if check_state_lock:
                    # Run blockjob pivot in subprocess as it will hang
                    # for a while, run blockjob info again to check
                    # job state
                    command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                target)
                    session = aexpect.ShellSession(command)
                    ret = virsh.blockjob(vm_name, target, "--info")
                    err_info = "cannot acquire state change lock"
                    if err_info in ret.stderr:
                        raise exceptions.TestFail("Hit on bug: %s" % bug_url)
                    utl.check_exit_status(ret, status_error)
                    session.close()
            else:
                raise exceptions.TestFail(cmd_result.stdout.strip() +
                                          cmd_result.stderr)
        else:
            if status:
                logging.debug("Expect error: %s", cmd_result.stderr)
            else:
                # Commit id '4c297728' changed how virsh exits when
                # unexpectedly failing due to timeout from a fail (1)
                # to a success(0), so we need to look for a different
                # marker to indicate the copy aborted. As "stdout: Now
                # in mirroring phase" could be in stdout which fail the
                # check, so also do check in libvirtd log to confirm.
                if options.count("--timeout") and options.count("--wait"):
                    log_pattern = "Copy aborted"
                    if (re.search(log_pattern, cmd_result.stdout.strip())
                            or chk_libvirtd_log(libvirtd_log_path, log_pattern,
                                                "debug")):
                        logging.debug("Found success a timed out block copy")
                else:
                    raise exceptions.TestFail("Expect fail, but run "
                                              "successfully: %s" % bug_url)
    finally:
        # Recover VM may fail unexpectedly, we need using try/except to
        # proceed the following cleanup steps
        try:
            # Abort exist blockjob to avoid any possible lock error
            virsh.blockjob(vm_name, target, '--abort', ignore_status=True)
            vm.destroy(gracefully=False)
            # It may take a long time to shutdown the VM which has
            # blockjob running
            utils_misc.wait_for(
                lambda: virsh.domstate(vm_name, ignore_status=True).
                exit_status, 180)
            if virsh.domain_exists(vm_name):
                if active_snap or with_shallow:
                    option = "--snapshots-metadata"
                else:
                    option = None
                original_xml.sync(option)
            else:
                original_xml.define()
        except Exception as e:
            logging.error(e)
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up libvirt pool, which may be created by 'set_vm_disk'
        if disk_type == 'volume':
            virsh.pool_destroy(pool_name, ignore_status=True, debug=True)
        # Restore libvirtd conf and restart libvirtd
        libvirtd_conf.restore()
        libvirtd_utl.restart()
        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
        # Clean up NFS
        try:
            if nfs_cleanup:
                utl.setup_or_cleanup_nfs(is_setup=False)
        except Exception as e:
            logging.error(e)
        # Clean up iSCSI
        try:
            for iscsi_n in list(set(emulated_iscsi)):
                utl.setup_or_cleanup_iscsi(is_setup=False,
                                           emulated_image=iscsi_n)
                # iscsid will be restarted, so give it a break before next loop
                time.sleep(5)
        except Exception as e:
            logging.error(e)
        if os.path.exists(dest_path):
            os.remove(dest_path)
        if os.path.exists(snap_path):
            os.remove(snap_path)
        if os.path.exists(save_path):
            os.remove(save_path)
        # Restart virtlogd service to release VM log file lock
        try:
            path.find_command('virtlogd')
            process.run('systemctl reset-failed virtlogd')
            process.run('systemctl restart virtlogd ')
        except path.CmdNotFoundError:
            pass
Exemple #43
0
def run(test, params, env):
    """
    1. prepare a fc lun with one of following methods
        - create a scsi pool&vol
        - create a vhba
    2. prepare the virtual disk xml, as one of following
        - source = /dev/disk/by-path
        - source = /dev/mapper/mpathX
        - source = pool&vol format
    3. start a vm with above disk as vdb
    4. create disk-only snapshot of vdb
    5. check the snapshot-list and snapshot file's existence
    6. mount vdb and touch file to it
    7. revert the snapshot and check file's existence
    8. delete snapshot
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "file")
    disk_size = params.get("disk_size", "100M")
    device_target = params.get("device_target", "vdb")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    vd_format = params.get("vd_format", "")
    snapshot_dir = params.get("snapshot_dir", "/tmp")
    snapshot_name = params.get("snapshot_name", "s1")
    pool_name = params.get("pool_name", "")
    pool_target = params.get("pool_target", "/dev")
    snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no")
    new_vhbas = []
    current_vhbas = []
    new_vhba = []
    path_to_blk = ""
    lun_sl = []
    new_disk = ""
    pool_ins = None
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    vm = env.get_vm(vm_name)
    online_hbas = utils_npiv.find_hbas("hba")
    if not online_hbas:
        raise exceptions.TestSkipError("There is no online hba cards.")
    old_mpath_conf = utils_npiv.prepare_multipath_conf(
        conf_path=mpath_conf_path, replace_existing=True)
    first_online_hba = online_hbas[0]
    old_vhbas = utils_npiv.find_hbas("vhba")
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)
    old_disks = virt_vm.get_disks()

    if vm.is_alive():
        vm.destroy(gracefully=False)
    if pool_name:
        pool_ins = libvirt_storage.StoragePool()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    try:
        # prepare a fc lun
        if vd_format in ['scsi_vol']:
            if pool_ins.pool_exists(pool_name):
                raise exceptions.TestFail("Pool %s already exist" % pool_name)
            prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba,
                              pool_target)
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT)
            if not utils_npiv.is_vhbas_added(old_vhbas):
                raise exceptions.TestFail("vHBA not successfully created")
            current_vhbas = utils_npiv.find_hbas("vhba")
            new_vhba = list(set(current_vhbas).difference(set(old_vhbas)))[0]
            new_vhbas.append(new_vhba)
            new_vhba_scsibus = re.sub("\D", "", new_vhba)
            utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                timeout=_TIMEOUT)
            new_blks = get_blks_by_scsi(new_vhba_scsibus)
            if not new_blks:
                raise exceptions.TestFail(
                    "block device not found with scsi_%s", new_vhba_scsibus)
            vol_list = utlv.get_vol_list(pool_name,
                                         vol_check=True,
                                         timeout=_TIMEOUT * 3)
            path_to_blk = list(vol_list.values())[0]
        elif vd_format in ['mpath', 'by_path']:
            old_mpath_devs = utils_npiv.find_mpath_devs()
            new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": first_online_hba,
                "scsi_wwnn": wwnn,
                "scsi_wwpn": wwpn
            })
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT * 2)
            if not new_vhba:
                raise exceptions.TestFail("vHBA not successfully generated.")
            new_vhbas.append(new_vhba)
            if vd_format == "mpath":
                utils_misc.wait_for(
                    lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
                    timeout=_TIMEOUT * 5)
                if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
                    raise exceptions.TestFail("mpath dev not generated.")
                cur_mpath_devs = utils_npiv.find_mpath_devs()
                new_mpath_devs = list(
                    set(cur_mpath_devs).difference(set(old_mpath_devs)))
                logging.debug("The newly added mpath dev is: %s",
                              new_mpath_devs)
                path_to_blk = "/dev/mapper/" + new_mpath_devs[0]
            elif vd_format == "by_path":
                new_vhba_scsibus = re.sub("\D", "", new_vhba)
                utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                    timeout=_TIMEOUT)
                new_blks = get_blks_by_scsi(new_vhba_scsibus)
                if not new_blks:
                    raise exceptions.TestFail("blk dev not found with scsi_%s",
                                              new_vhba_scsibus)
                first_blk_dev = new_blks[0]
                utils_misc.wait_for(lambda: get_symbols_by_blk(first_blk_dev),
                                    timeout=_TIMEOUT)
                lun_sl = get_symbols_by_blk(first_blk_dev)
                if not lun_sl:
                    raise exceptions.TestFail(
                        "lun symbolic links not found in "
                        "/dev/disk/by-path/ for %s" % first_blk_dev)
                lun_dev = lun_sl[0]
                path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
            else:
                pass
        else:
            raise exceptions.TestSkipError("Not provided how to pass"
                                           "virtual disk to VM.")

        # create qcow2 file on the block device with specified size
        if path_to_blk:
            cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size)
            try:
                process.run(cmd, shell=True)
            except process.cmdError as detail:
                raise exceptions.TestFail(
                    "Fail to create qcow2 on blk dev: %s", detail)
        else:
            raise exceptions.TestFail("Don't have a valid path to blk dev.")

        # prepare disk xml
        if "vol" in vd_format:
            vol_list = utlv.get_vol_list(pool_name,
                                         vol_check=True,
                                         timeout=_TIMEOUT * 3)
            test_vol = list(vol_list.keys())[0]
            disk_params = {
                'type_name': disk_type,
                'target_dev': device_target,
                'target_bus': target_bus,
                'source_pool': pool_name,
                'source_volume': test_vol,
                'driver_type': driver_type
            }
        else:
            disk_params = {
                'type_name': disk_type,
                'device': disk_device,
                'driver_name': driver_name,
                'driver_type': driver_type,
                'source_file': path_to_blk,
                'target_dev': device_target,
                'target_bus': target_bus
            }
        if vm.is_alive():
            vm.destroy(gracefully=False)
        new_disk = disk.Disk()
        new_disk.xml = open(utlv.create_disk_xml(disk_params)).read()

        # start vm with the virtual disk
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        cur_disks = virt_vm.get_disks()
        mount_disk = "".join(list(set(old_disks) ^ set(cur_disks)))

        # mkfs and mount disk in vm, create a file on that disk.
        if not mount_disk:
            logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks)
            raise exceptions.TestFail("No new disk found in vm.")
        mkfs_and_mount(session, mount_disk)
        create_file_in_vm(session, "/mnt/before_snapshot.txt", "before")

        # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path
        if snapshot_disk_only:
            vm_blks = list(vm.get_disk_devices().keys())
            options = "%s --disk-only" % snapshot_name
            for vm_blk in vm_blks:
                snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name
                if os.path.exists(snapshot_file):
                    os.remove(snapshot_file)
                options = options + " --diskspec %s,file=%s" % (vm_blk,
                                                                snapshot_file)
        else:
            options = snapshot_name
        utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options))

        # check virsh snapshot-list
        logging.debug("Running: snapshot-list %s", vm_name)
        snapshot_list = virsh.snapshot_list(vm_name)
        logging.debug("snapshot list is: %s", snapshot_list)
        if not snapshot_list:
            raise exceptions.TestFail("snapshots not found after creation.")

        # snapshot-revert doesn't support external snapshot for now. so
        # only check this with internal snapshot.
        if not snapshot_disk_only:
            create_file_in_vm(session, "/mnt/after_snapshot.txt", "after")
            logging.debug("Running: snapshot-revert %s %s", vm_name,
                          snapshot_name)
            utlv.check_exit_status(
                virsh.snapshot_revert(vm_name, snapshot_name))
            session = vm.wait_for_login()
            file_existence, file_content = get_file_in_vm(
                session, "/mnt/after_snapshot.txt")
            logging.debug("file exist = %s, file content = %s", file_existence,
                          file_content)
            if file_existence:
                raise exceptions.TestFail("The file created "
                                          "after snapshot still exists.")
            file_existence, file_content = get_file_in_vm(
                session, "/mnt/before_snapshot.txt")
            logging.debug("file eixst = %s, file content = %s", file_existence,
                          file_content)
            if ((not file_existence) or (file_content.strip() != "before")):
                raise exceptions.TestFail("The file created "
                                          "before snapshot is lost.")
        # delete snapshots
        # if diskonly, delete --metadata and remove files
        # if not diskonly, delete snapshot
        if snapshot_disk_only:
            options = "--metadata"
        else:
            options = ""
        for snap in snapshot_list:
            logging.debug("deleting snapshot %s with options %s", snap,
                          options)
            result = virsh.snapshot_delete(vm_name, snap, options)
            logging.debug("result of snapshot-delete: %s",
                          result.stdout.strip())
            if snapshot_disk_only:
                vm_blks = list(vm.get_disk_devices().keys())
                for vm_blk in vm_blks:
                    snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap
                    if os.path.exists(snapshot_file):
                        os.remove(snapshot_file)
        snapshot_list = virsh.snapshot_list(vm_name)
        if snapshot_list:
            raise exceptions.TestFail("Snapshot not deleted: %s",
                                      snapshot_list)
    except Exception as detail:
        raise exceptions.TestFail("exception happens: %s", detail)
    finally:
        logging.debug("Start to clean up env...")
        vmxml_backup.sync()
        if pool_ins and pool_ins.pool_exists(pool_name):
            virsh.pool_destroy(pool_name)
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Exemple #44
0
                                     "1048576",
                                     "raw",
                                     ignore_status=True)
        if result.exit_status != 0:
            raise error.TestFail("Command virsh vol-create-as failed:\n%s" %
                                 result.stdout)
        else:
            logging.debug("Volume: %s successfully created on pool: %s",
                          vol_name, pool_name)

        if not check_vol_list(vol_name, pool_name, pool_target):
            raise error.TestFail("Volume %s is not found in the "
                                 "output of virsh vol-list" % vol_name)

        # Step (10)
        if not virsh.pool_destroy(pool_name):
            raise error.TestFail("Command virsh pool-destroy failed")
        else:
            logging.debug("Pool: %s destroyed successfully", pool_name)

        if not check_list_state(pool_name, "inactive"):
            raise error.TestFail("State of pool: %s marked as active"
                                 "instead of inactive" % pool_name)
        if not check_list_autostart(pool_name, "yes"):
            raise error.TestFail("Autostart of pool: %s marked as no"
                                 "instead of yes" % pool_name)

        # Step (11)
        if check_vol_list(vol_name, pool_name, pool_target):
            raise error.TestFail("Command virsh vol-list succeeded"
                                 " on an inactive pool")
Exemple #45
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    new_pool_name = params.get("new_pool_name", "")
    build_option = params.get("build_option", "")
    same_source_test = "yes" == params.get("same_source_test", "no")
    customize_initiator_iqn = "yes" == params.get("customize_initiator_iqn", "no")
    # The file for dumped pool xml
    poolxml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get('ip_protocal', 'ipv4')

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)",
                            str(result.stdout.strip()))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            test.fail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            test.fail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)", str(result.stdout.strip()))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug(
                "Find volume '%s' in pool '%s'.", vol_name, pool_name)
        else:
            test.fail(
                "Not find volume '%s' in pool '%s'." %
                (vol_name, pool_name))

    def is_in_range(actual, expected, error_percent):
        deviation = 100 - (100 * (float(actual) / float(expected)))
        logging.debug("Deviation: %0.2f%%", float(deviation))
        return float(deviation) <= float(error_percent)

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            test.fail("Pool info dictionary is needed.")
        val_tup = ('Capacity', 'Allocation', 'Available')
        if check_point in val_tup and float(value.split()[0]):
            # As from bytes to GiB, could cause deviation, and it should not
            # exceed 1 percent.
            if is_in_range(float(pool_info[check_point].split()[0]),
                           float(value.split()[0]), 1):
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                test.fail("Pool '%s' isn't '%s'." %
                          (check_point, value))
        else:
            if pool_info[check_point] == value:
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                test.fail("Pool '%s' isn't '%s'." %
                          (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {'image_size': '1G', 'pre_disk_vol': ['100M'],
              'source_name': source_name, 'source_path': source_path,
              'source_format': source_format, 'persistent': True,
              'ip_protocal': ip_protocal}
    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=poolxml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Update pool name
        if new_pool_name:
            if "/" in new_pool_name:
                new_pool_name = new_pool_name.replace("/", "\/")
                logging.debug(new_pool_name)
            p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
            p_xml.name = new_pool_name
            del p_xml.uuid
            poolxml = p_xml.xml
            logging.debug("XML after update pool name:\n%s" % p_xml)

        # Update host name
        if same_source_test:
            s_xml = p_xml.get_source()
            s_xml.host_name = "192.168.1.1"
            p_xml.set_source(s_xml)
            poolxml = p_xml.xml
            logging.debug("XML after update host name:\n%s" % p_xml)

        if customize_initiator_iqn:
            initiator_iqn = params.get("initiator_iqn",
                                       "iqn.2018-07.com.virttest:pool.target")
            p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
            s_node = p_xml.xmltreefile.find('/source')
            i_node = ET.SubElement(s_node, 'initiator')
            ET.SubElement(i_node, 'iqn', {'name': initiator_iqn})
            p_xml.xmltreefile.write()
            poolxml = p_xml.xml
            logging.debug('XML after add Multi-IQN:\n%s' % p_xml)

        # Step (4)
        # Undefine pool
        if not same_source_test:
            result = virsh.pool_undefine(pool_name)
            utlv.check_exit_status(result)
            check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(poolxml, debug=True)
        # Give error msg when exit status is not expected
        if "/" in new_pool_name and not result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=639923 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        if "." in new_pool_name and result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1333248 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        if same_source_test and not result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1171984 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            # Step (6)
            # Buid pool
            # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool
            # disk/fs pool: as prepare step already make label and create filesystem
            #               for the disk, use '--overwrite' is necessary
            # logical_pool: build pool will fail if VG already exist, BZ#1373711
            if new_pool_name:
                pool_name = new_pool_name
            if pool_type != "logical":
                result = virsh.pool_build(pool_name, build_option, ignore_status=True)
                utlv.check_exit_status(result)

            # Step (7)
            # Pool start
            result = virsh.pool_start(pool_name, debug=True, ignore_status=True)
            utlv.check_exit_status(result)

            # Step (8)
            # Pool list
            option = "--persistent --type %s" % pool_type
            check_pool_list(pool_name, option)

            # Step (9)
            # Pool autostart
            result = virsh.pool_autostart(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

            # Step (10)
            # Pool list
            option = "--autostart --type %s" % pool_type
            check_pool_list(pool_name, option)

            # Step (11)
            # Restart libvirtd and check the autostart pool
            utils_libvirtd.libvirtd_restart()
            option = "--autostart --persistent"
            check_pool_list(pool_name, option)

            # Step (12)
            # Pool destroy
            if virsh.pool_destroy(pool_name):
                logging.debug("Pool %s destroyed.", pool_name)
            else:
                test.fail("Destroy pool % failed." % pool_name)

            # Step (13)
            # Pool autostart disable
            result = virsh.pool_autostart(pool_name, "--disable",
                                          ignore_status=True)
            utlv.check_exit_status(result)

            # Step (14)
            # Repeat step (11)
            utils_libvirtd.libvirtd_restart()
            option = "--autostart"
            check_pool_list(pool_name, option, True)

            # Step (15)
            # Pool start
            # When libvirtd starts up, it'll check to see if any of the storage
            # pools have been activated externally. If so, then it'll mark the
            # pool as active. This is independent of autostart.
            # So a directory based storage pool is thus pretty much always active,
            # and so as the SCSI pool.
            if pool_type not in ["dir", 'scsi']:
                result = virsh.pool_start(pool_name, ignore_status=True)
                utlv.check_exit_status(result)

            # Step (16)
            # Pool info
            pool_info = _pool.pool_info(pool_name)
            logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

            # Step (17)
            # Pool UUID
            result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_info(pool_info, "UUID", result.stdout.strip())

            # Step (18)
            # Pool Name
            result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_info(pool_info, "Name", result.stdout.strip())

            # Step (19)
            # Pool refresh for 'dir' type pool
            if pool_type == "dir":
                os.mknod(vol_path)
                result = virsh.pool_refresh(pool_name)
                utlv.check_exit_status(result)
                check_vol_list(vol_name, pool_name)

            # Step (20)
            # Create an over size vol in pool(expect fail), then check pool:
            # 'Capacity', 'Allocation' and 'Available'
            # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
            # and glusterfs pool not support create volume, so not test them
            if pool_type != "netfs":
                vol_capacity = "10000G"
                vol_allocation = "10000G"
                result = virsh.vol_create_as("oversize_vol", pool_name,
                                             vol_capacity, vol_allocation, "raw")
                utlv.check_exit_status(result, True)
                new_info = _pool.pool_info(pool_name)
                check_items = ["Capacity", "Allocation", "Available"]
                for i in check_items:
                    check_pool_info(pool_info, i, new_info[i])

            # Step (21)
            # Undefine pool, this should fail as the pool is active
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            utlv.check_exit_status(result, expect_error=True)
            check_pool_list(pool_name, "", False)

            # Step (22)
            # Pool destroy
            if virsh.pool_destroy(pool_name):
                logging.debug("Pool %s destroyed.", pool_name)
            else:
                test.fail("Destroy pool % failed." % pool_name)

            # Step (23)
            # Pool delete for 'dir' type pool
            if pool_type == "dir":
                for f in os.listdir(pool_target):
                    os.remove(os.path.join(pool_target, f))
                    result = virsh.pool_delete(pool_name, ignore_status=True)
                    utlv.check_exit_status(result)
                    option = "--inactive --type %s" % pool_type
                    check_pool_list(pool_name, option)
                    if os.path.exists(pool_target):
                        test.fail("The target path '%s' still exist." %
                                  pool_target)
                        result = virsh.pool_start(pool_name, ignore_status=True)
                        utlv.check_exit_status(result, True)

            # Step (24)
            # Pool undefine
                result = virsh.pool_undefine(pool_name, ignore_status=True)
                utlv.check_exit_status(result)
                check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **kwargs)
            utlv.setup_or_cleanup_iscsi(False)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(poolxml):
            os.remove(poolxml)
Exemple #46
0
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    cleanup_env = [False, False, False]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current" +
                                    " libvirt version.")

    acl_dargs = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True
    }

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utils_test.libvirt.define_pool(pool_name, pool_type,
                                                pool_target, cleanup_env)
        check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            check_exit_status(result)

        # Step (3)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
            # Options --overwrite and --no-overwrite can only be used to
            # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name,
                                          option,
                                          ignore_status=True)
            check_exit_status(result, build_error)
            if build_error:
                # Redo under negative case to keep case continue
                result = virsh.pool_build(pool_name,
                                          option,
                                          ignore_status=True)
                check_exit_status(result)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected." %
                                     pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_env[1]:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        vmxml_backup.sync("--snapshots-metadata")
Exemple #48
0
def run(test, params, env):
    """
    Test command: virsh pool-define; pool-define-as; pool-start;
    vol-list pool; attach-device LUN to guest; mount the device;
    dd to the mounted device; unmount; pool-destroy; pool-undefine;

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = params.get("pre_def_pool", "no")
    define_pool = params.get("define_pool", "no")
    define_pool_as = params.get("define_pool_as", "no")
    pool_create_as = params.get("pool_create_as", "no")
    need_pool_build = params.get("need_pool_build", "no")
    need_vol_create = params.get("need_vol_create", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("disk_target_dev", "sdc")
    pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE")
    vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE")
    vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    vol_format = params.get("volume_format", 'raw')
    attach_method = params.get("attach_method", "hot")
    test_unit = None
    mount_disk = None
    pool_kwargs = {}
    pool_extra_args = ""
    emulated_image = "emulated-image"
    disk_xml = ""
    new_vhbas = []
    source_dev = ""
    mpath_vol_path = ""
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    if pool_type == "scsi":
        if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn):
            raise exceptions.TestSkipError(
                    "No wwpn and wwnn provided for npiv scsi pool.")
    if pool_type == "logical":
        if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn):
            raise exceptions.TestSkipError(
                    "No wwpn and wwnn provided for vhba.")
    online_hbas_list = utils_npiv.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)
    old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                       replace_existing=True)
    if not online_hbas_list:
        raise exceptions.TestSkipError(
            "Host doesn't have online hba cards")
    old_vhbas = utils_npiv.find_hbas("vhba")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    if not vm.is_alive():
        vm.start()
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    if pool_type == "scsi":
        if define_pool == "yes":
            if pool_adapter_parent == "":
                pool_adapter_parent = online_hbas_list[0]
            pool_kwargs = {'source_path': source_path,
                           'source_name': source_name,
                           'source_format': source_format,
                           'pool_adapter_type': pool_adapter_type,
                           'pool_adapter_parent': pool_adapter_parent,
                           'pool_wwnn': pool_wwnn,
                           'pool_wwpn': pool_wwpn}
    elif pool_type == "logical":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": online_hbas_list[0],
                "scsi_wwnn": vhba_wwnn,
                "scsi_wwpn": vhba_wwpn})
        utils_misc.wait_for(
            lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not sucessfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME*5)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        source_dev = "/dev/mapper/" + new_mpath_devs[0]
        logging.debug("We are going to use \"%s\" as our source device"
                      " to create a logical pool", source_dev)
        try:
            cmd = "parted %s mklabel msdos -s" % source_dev
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
        if define_pool_as == "yes":
            pool_extra_args = ""
            if source_dev:
                pool_extra_args = ' --source-dev %s' % source_dev
    elif pool_type == "mpath":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": online_hbas_list[0],
                "scsi_wwnn": vhba_wwnn,
                "scsi_wwpn": vhba_wwpn})
        utils_misc.wait_for(
            lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not sucessfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME*2)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0]
        try:
            cmd = "parted %s mklabel msdos -s" % mpath_vol_path
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
    if pre_def_pool == "yes":
        try:
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type,
                         pool_target, emulated_image,
                         **pool_kwargs)
            utils_misc.wait_for(
                    lambda: utils_npiv.is_vhbas_added(old_vhbas),
                    _DELAY_TIME*2)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            virsh.pool_destroy(pool_name)
        except Exception as e:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **pool_kwargs)
            raise exceptions.TestError(
                "Error occurred when prepare pool xml:\n %s" % e)
        if os.path.exists(pool_xml_f):
            with open(pool_xml_f, 'r') as f:
                logging.debug("Create pool from file: %s", f.read())
    try:
        # define/create/start the pool
        if (pre_def_pool == "yes") and (define_pool == "yes"):
            pool_define_status = virsh.pool_define(pool_xml_f,
                                                   ignore_status=True,
                                                   debug=True)
            utlv.check_exit_status(pool_define_status)
        if define_pool_as == "yes":
            pool_define_as_status = virsh.pool_define_as(
                pool_name, pool_type,
                pool_target, pool_extra_args,
                ignore_status=True, debug=True
                )
            utlv.check_exit_status(pool_define_as_status)
        if pool_create_as == "yes":
            if pool_type != "scsi":
                raise exceptions.TestSkipError("pool-create-as only needs to "
                                               "be covered by scsi pool for "
                                               "NPIV test.")
            cmd = "virsh pool-create-as %s %s \
                   --adapter-wwnn %s --adapter-wwpn %s \
                   --adapter-parent %s --target %s"\
                   % (pool_name, pool_type, pool_wwnn, pool_wwpn,
                      online_hbas_list[0], pool_target)
            cmd_status = process.system(cmd, verbose=True)
            if cmd_status:
                raise exceptions.TestFail("pool-create-as scsi pool failed.")
        if need_pool_build == "yes":
            pool_build_status = virsh.pool_build(pool_name, "--overwrite")
            utlv.check_exit_status(pool_build_status)

        pool_ins = libvirt_storage.StoragePool()
        if not pool_ins.pool_exists(pool_name):
            raise exceptions.TestFail("define or create pool failed.")
        else:
            if not pool_ins.is_pool_active(pool_name):
                pool_start_status = virsh.pool_start(pool_name)
                utlv.check_exit_status(pool_start_status)
                utlv.check_actived_pool(pool_name)
                pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
                logging.debug("Pool detail: %s", pool_detail)

        # create vol if required
        if need_vol_create == "yes":
            vol_create_as_status = virsh.vol_create_as(
                    volume_name, pool_name,
                    volume_capacity, allocation,
                    vol_format, "", debug=True
                    )
            utlv.check_exit_status(vol_create_as_status)
        virsh.pool_refresh(pool_name)
        vol_list = utlv.get_vol_list(pool_name, vol_check=True,
                                     timeout=_DELAY_TIME*3)
        logging.debug('Volume list is: %s' % vol_list)

        # use test_unit to save the first vol in pool
        if pool_type == "mpath":
            cmd = "virsh vol-list %s | grep \"%s\" |\
                   awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path)
            cmd_result = process.run(cmd, shell=True)
            status = cmd_result.exit_status
            output = cmd_result.stdout_text.strip()
            if cmd_result.exit_status:
                raise exceptions.TestFail("vol-list pool %s failed", pool_name)
            if not output:
                raise exceptions.TestFail("Newly added mpath dev not in pool.")
            test_unit = output
            logging.info(
                "Using %s to attach to a guest", test_unit)
        else:
            test_unit = list(vol_list.keys())[0]
            logging.info(
                "Using the first volume %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()

        # prepare disk xml which will be hot/cold attached to vm
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': vol_format}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        disk_xml_str = open(lun_disk_xml).read()
        logging.debug("The disk xml is: %s", disk_xml_str)

        # hot attach disk xml to vm
        if attach_method == "hot":
            copyfile(lun_disk_xml, disk_xml)
            dev_attach_status = virsh.attach_device(vm_name, disk_xml,
                                                    debug=True)
            # Pool/vol virtual disk is not supported by mpath pool yet.
            if dev_attach_status.exit_status and pool_type == "mpath":
                raise exceptions.TestSkipError("mpath pool vol is not "
                                               "supported in virtual disk yet,"
                                               "the error message is: %s",
                                               dev_attach_status.stderr)
                session.close()
            utlv.check_exit_status(dev_attach_status)
        # cold attach disk xml to vm
        elif attach_method == "cold":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            new_disk = disk.Disk()
            new_disk.xml = disk_xml_str
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.sync()
            logging.debug(vmxml)
            try:
                vm.start()
            except virt_vm.VMStartError as e:
                logging.debug(e)
                if pool_type == "mpath":
                    raise exceptions.TestSkipError("'mpath' pools for backing "
                                                   "'volume' disks isn't "
                                                   "supported for now")
                else:
                    raise exceptions.TestFail("Failed to start vm")
            session = vm.wait_for_login()
        else:
            pass

        # checking attached disk in vm
        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output(
            'echo yes | mkfs.ext4 %s' % mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        # detach disk from vm
        dev_detach_status = virsh.detach_device(vm_name, disk_xml,
                                                debug=True)
        utlv.check_exit_status(dev_detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            data_dir.clean_tmp_files()
            logging.debug("Cleanup disk xml")
        if pre_def_pool == "yes":
            # Do not apply cleanup_pool for logical pool, logical pool will
            # be cleaned below
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **pool_kwargs)
        if (test_unit and
                (need_vol_create == "yes" and (pre_def_pool == "no")) and
                (pool_type == "logical")):
            process.system('lvremove -f %s/%s' % (pool_name, test_unit),
                           verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            utils_npiv.vhbas_cleanup(new_vhbas)
        # Restart multipathd, this is to avoid bz1399075
        if source_dev:
            utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(source_dev),
                                _DELAY_TIME*5, 0.0, 5.0)
        elif mpath_vol_path:
            utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(mpath_vol_path),
                                _DELAY_TIME*5, 0.0, 5.0)
        else:
            utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
def run(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file.
    1) Use xml file from test parameters to create pool.
    2) Dump the exist pool to create pool.
    3) Negatively create pool(invalid option, no file, invalid file, duplicate
       pool name, and create pool under readonly mode).
    """

    pool_xml = params.get("pool_create_xml_file")
    pool_name = params.get("pool_create_name")
    option = params.get("pool_create_extra_option", "")
    use_exist_pool = "yes" == params.get("pool_create_use_exist_pool", "no")
    exist_pool_name = params.get("pool_create_exist_pool_name", "default")
    undefine_exist_pool = "yes" == params.get(
        "pool_create_undefine_exist_pool", "no")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")
    exist_active = False

    # Deal with each parameters
    # backup the exist pool
    pool_ins = libvirt_storage.StoragePool()
    if use_exist_pool:
        if not pool_ins.pool_exists(exist_pool_name):
            raise error.TestFail("Require pool: %s exist", exist_pool_name)
        backup_xml = libvirt_xml.PoolXML.backup_xml(exist_pool_name)
        pool_xml = backup_xml
        exist_active = pool_ins.is_pool_active(exist_pool_name)

    # backup pool state
    pool_ins_state = virsh.pool_state_dict()
    logging.debug("Backed up pool(s): %s", pool_ins_state)

    created_pool_path = False
    if "--file" in option:
        # As of commit id 'c5f6c685' in virt-install (v1.0.0), virt-install
        # will attempt to create a storage pool for ease of management of
        # volumes. Thus if the test was started with --install or by chance
        # an 'images' pool exists using get_data_dir(), then the attempt
        # to define/create this pool will fail since two pools cannot use
        # the same path.  Therefore, rather than use the data dir for our
        # test, let's just use 'tmp' dir and create an images pool there.
        pool_path = os.path.join(data_dir.get_tmp_dir(), 'images')
        if not os.path.exists(pool_path):
            os.mkdir(pool_path)
            created_pool_path = True
        dir_xml = """
<pool type='dir'>
  <name>%s</name>
  <target>
    <path>%s</path>
  </target>
</pool>
""" % (pool_name, pool_path)
        pool_xml = os.path.join(test.tmpdir, pool_xml)
        xml_object = open(pool_xml, 'w')
        xml_object.write(dir_xml)
        xml_object.close()

    # Delete the exist pool
    start_pool = False
    if undefine_exist_pool:
        poolxml = libvirt_xml.PoolXML.new_from_dumpxml(exist_pool_name)
        poolxml.name = pool_name
        pool_xml = poolxml.xml
        if pool_ins.is_pool_active(exist_pool_name):
            start_pool = True
        if not pool_ins.delete_pool(exist_pool_name):
            raise error.TestFail("Delete pool: %s fail", exist_pool_name)

    # Create an invalid pool xml file
    if pool_xml == "invalid-pool-xml":
        tmp_xml = xml_utils.TempXMLFile()
        tmp_xml.write('"<pool><<<BAD>>><\'XML</name\>'
                      '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>')
        tmp_xml.flush()
        pool_xml = tmp_xml.name
        logging.info(pool_xml)

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    # Run virsh test
    if os.path.isfile(pool_xml):
        logging.debug("Create pool from file:\n %s",
                      open(pool_xml, 'r').read())
    try:
        cmd_result = virsh.pool_create(pool_xml,
                                       option,
                                       ignore_status=True,
                                       debug=True,
                                       readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            elif not pool_check(pool_name, pool_ins):
                raise error.TestFail("Pool check fail")
        elif status_error and status == 0:
            # For an inactive 'default' pool, when the test runs to create
            # an existing pool of 'default', the test will pass because the
            # 'default' pool will be considered a transient pool when the
            # 'name' and 'uuid' match (which they do in this case). So
            # don't fail the test unnecessarily
            if (pool_name == exist_pool_name and not exist_active):
                pass
            else:
                raise error.TestFail("Expect fail, but run successfully.")
    finally:
        # Recover env
        # If we have a different pool name than default OR
        # we need to undefine this tests created default pool OR
        # we had a transient, active default pool created above, then
        # we need to destroy what the test created.
        # NB: When the active, transient pool is destroyed the
        # previously defined, but inactive pool will now exist.
        if pool_name != exist_pool_name or undefine_exist_pool or \
           (pool_name == exist_pool_name and not exist_active):
            virsh.pool_destroy(pool_name)

        # restore the undefined default pool
        if undefine_exist_pool:  # and not pool_ins.pool_exists(exist_pool_name):
            virsh.pool_define(backup_xml)
            if start_pool:
                pool_ins.start_pool(exist_pool_name)
            # Recover autostart
            if pool_ins_state[exist_pool_name]['autostart']:
                virsh.pool_autostart(exist_pool_name, ignore_status=False)
        # If we created the pool_path, then remove it
        if created_pool_path:
            os.rmdir(pool_path)
Exemple #50
0
def run(test, params, env):
    """
    Test command: virsh pool-define-as; pool-build; pool-start; vol-create-as;
    vol-list; attach-device; login; mount and dd; reboot; check persistence;
    detach-device; pool-destroy; pool-undefine; clear lv,vg and pv;
    Create a libvirt npiv pool from a vHBA's device mapper device and create
    a volume out of the newly created pool and attach it to a guest, mount it,
    reboot and check persistence after reboot.

    Pre-requisite :
    Host should have a vHBA associated with a mpath device
    """

    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pool_type = params.get("pool_type", "dir")
    scsi_wwpn = params.get("scsi_wwpn", "WWPN_EXAMPLE")
    scsi_wwnn = params.get("scsi_wwnn", "WWNN_EXAMPLE")
    pool_target = params.get("pool_target", "pool_target")
    target_device = params.get("disk_target_dev", "vda")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    frmt = params.get("volume_format", 'qcow2')
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    mount_disk = None
    test_unit = None

    if 'EXAMPLE' in scsi_wwnn or 'EXAMPLE' in scsi_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    if not vm.is_alive():
        vm.start()
    pool_extra_args = ""
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)
    process.run("service multipathd restart", shell=True)
    online_hbas_list = nodedev.find_hbas("hba")
    first_online_hba = online_hbas_list[0]
    old_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the old mpath devs are: %s" % old_mpath_devs)
    new_vhbas = nodedev.nodedev_create_from_xml({
        "nodedev_parent": first_online_hba,
        "scsi_wwnn": scsi_wwnn,
        "scsi_wwpn": scsi_wwpn
    })
    logging.info("Newly created vHBA %s" % new_vhbas)
    process.run("service multipathd restart", shell=True)

    utils_misc.wait_for(lambda: nodedev.is_mpath_devs_added(old_mpath_devs),
                        timeout=5)

    cur_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the current mpath devs are: %s" % cur_mpath_devs)
    new_mpath_devs = list(set(cur_mpath_devs).difference(set(old_mpath_devs)))

    logging.debug("newly added mpath devs are: %s" % new_mpath_devs)
    if not new_mpath_devs:
        raise exceptions.TestFail("No newly added mpath devices found, \
                please check your FC settings")
    source_dev = os.path.join('/dev/mapper/', new_mpath_devs[0])
    logging.debug("We are going to use \"%s\" as our source device"
                  " to create a logical pool" % source_dev)

    cmd = "parted %s mklabel msdos -s" % source_dev
    cmd_result = process.run(cmd, shell=True)
    utlv.check_exit_status(cmd_result)

    if source_dev:
        pool_extra_args = ' --source-dev %s' % source_dev
    else:
        raise exceptions.TestFail(
            "The vHBA %s does not have any associated mpath device" %
            new_vhbas)

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    # if no online hba cards on host, mark case failed
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    try:
        cmd_result = virsh.pool_define_as(pool_name,
                                          pool_type,
                                          pool_target,
                                          pool_extra_args,
                                          ignore_status=True,
                                          debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_build(pool_name)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)

        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        cmd_result = virsh.vol_create_as(volume_name,
                                         pool_name,
                                         volume_capacity,
                                         allocation,
                                         frmt,
                                         "",
                                         debug=True)
        utlv.check_exit_status(cmd_result)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        logging.debug('Volume list %s', vol_list)
        for unit in vol_list:
            test_unit = vol_list[unit]
            logging.debug(unit)

        disk_params = {
            'type_name': "file",
            'target_dev': target_device,
            'target_bus': "virtio",
            'source_file': test_unit,
            'driver_name': "qemu",
            'driver_type': "raw"
        }
        disk_xml = utlv.create_disk_xml(disk_params)
        session = vm.wait_for_login()

        bf_disks = libvirt_vm.get_disks()

        attach_success = virsh.attach_device(vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()

        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        output = session.cmd_status_output('lsblk', timeout=15)
        logging.debug("%s", output[1])

        session.cmd_status_output('mkfs.ext4 %s' % mount_disk)
        if mount_disk:
            logging.info("%s", mount_disk)
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Can not find mounted device")
        session.close()

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("Mount output: %s", output[1])
        if '/mnt' in output[1]:
            logging.debug("Mount Successful accross reboot")
        session.close()

        status = virsh.detach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(status)

    finally:
        vm.destroy(gracefully=False)
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if test_unit:
            process.system('lvremove -f %s' % test_unit, verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            nodedev.vhbas_cleanup(new_vhbas.split())
        process.run("service multipathd restart", shell=True)
Exemple #51
0
     try:
         cmd = "parted %s mklabel msdos -s" % mpath_vol_path
         cmd_result = process.run(cmd, shell=True)
     except Exception, e:
         raise exceptions.TestError("Error occurred when parted mklable")
 if pre_def_pool == "yes":
     try:
         pvt = utlv.PoolVolumeTest(test, params)
         pvt.pre_pool(pool_name, pool_type,
                      pool_target, emulated_image,
                      **pool_kwargs)
         utils_misc.wait_for(
                 lambda: utils_npiv.is_vhbas_added(old_vhbas),
                 _DELAY_TIME*2)
         virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
         virsh.pool_destroy(pool_name)
     except Exception, e:
         pvt.cleanup_pool(pool_name, pool_type, pool_target,
                          emulated_image, **pool_kwargs)
         raise exceptions.TestError(
             "Error occurred when prepare pool xml:\n %s" % e)
     if os.path.exists(pool_xml_f):
         try:
             f = open(pool_xml_f, 'r')
             logging.debug("Create pool from file: %s", f.read())
         finally:
             f.close()
 try:
     # define/create/start the pool
     if (pre_def_pool == "yes") and (define_pool == "yes"):
         pool_define_status = virsh.pool_define(pool_xml_f,
            # Start the VM and check status.
            vm.start()
            if status_error:
                raise error.TestFail("VM started unexpectedly.")

            if not check_in_vm(vm, device_target):
                raise error.TestFail("Check encryption disk in VM failed")
        except virt_vm.VMStartError, e:
            if status_error:
                logging.debug("VM failed to start as expected."
                              "Error: %s" % str(e))
                pass
            else:
                raise error.TestFail("VM failed to start."
                                     "Error: %s" % str(e))

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for i in sec_uuid:
            virsh.secret_undefine(i, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if virsh.pool_state_dict().has_key(pool_name):
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml")

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(
                    disk_src_host.split(), disk_src_port.split()):
                source_host.append({'name': host_name,
                                    'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'"
                            % (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(pool_name, mon_host,
                                      disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(),
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev]
        xml_dom_exp = ["source file='%s'" % snap_disk,
                       "backingStore type='network' index='1'",
                       "source protocol='rbd' name='%s'" % disk_src_name]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'"
                                % snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")
        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)"
                   .format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2")
        snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3")
        snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4")
        snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4")
        snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5")
        snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {"s1": "s1 --disk-only --no-metadata",
                                "s2": "s2 --memspec %s --no-metadata" % snapshot2_file,
                                "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file,
                                "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file),
                                "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file)}
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = results_stdout_52lts(secret_list_result).strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get("test_disk_internal_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no")
    disk_snapshot_with_sanlock = "yes" == params.get("disk_internal_with_sanlock", "no")

    # Create /etc/ceph/ceph.conf file to suppress false warning error message.
    process.run("mkdir -p /etc/ceph", ignore_status=True, shell=True)
    cmd = ("echo 'mon_host = {0}' >/etc/ceph/ceph.conf"
           .format(mon_host))
    process.run(cmd, ignore_status=True, shell=True)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name, guest_name,
                                        True, timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    img_file = os.path.join(data_dir.get_tmp_dir(),
                            "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(data_dir.get_tmp_dir(),
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        if not libvirt_version.version_compare(5, 0, 0):
            unsupported_err.append('Could not create file: Permission denied')
            unsupported_err.append('Permission denied')
        else:
            unsupported_err.append('unsupported configuration: external snapshot ' +
                                   'for readonly disk vdb is not supported')
    if test_disk_internal_snapshot:
        unsupported_err.append('unsupported configuration: internal snapshot for disk ' +
                               'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config = LibvirtSanLockConfig()
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict["sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict["chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict["restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict["sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                             "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict["chown sanlock:sanlock " +
                             "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict["sanlock client add_lockspace -s TEST_LS:1:" +
                             "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'}
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" %
                     (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" %
                          (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port})
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt,
                                          disk_src_name, disk_format,
                                          blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {"name": vol_name, "capacity": int(vol_cap),
                          "capacity_unit": vol_cap_unit, "format": disk_format}

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                        " %s %s %s" % (mon_host, key_opt, disk_src_name,
                                       disk_format, img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}'
                            % (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, and then wait the OS boot.
        # Otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login().close()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            xml_file = libvirt.create_disk_xml(params)
            if additional_guest:
                # Copy xml_file for additional guest VM.
                shutil.copyfile(xml_file, additional_xml_file)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                    additional_vm.wait_for_login().close()
                ret = virsh.attach_device(guest_name, additional_xml_file,
                                          "", debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path,
                                    targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(vm, targetdev, old_parts,
                               read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm"
                          " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.cancel(str(details))
        else:
            test.fail("VM failed to start."
                      "Error: %s" % str(details))
    finally:
        # Remove /etc/ceph/ceph.conf file if exists.
        if os.path.exists('/etc/ceph/ceph.conf'):
            os.remove('/etc/ceph/ceph.conf')
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
Exemple #54
0
def run(test, params, env):
    """
    Test command: virsh pool-define;pool-start;vol-list pool;
    attach-device LUN to guest; mount the device, dd; unmount;
    reboot guest; mount the device, dd again; pool-destroy; pool-undefine;

    Create a libvirt npiv pool from an XML file. The test needs to have a wwpn
    and wwnn of a vhba in host which is zoned & mapped to a SAN controller.

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("pool_target_device", "sdc")
    pool_wwnn = params.get("pool_wwnn", "WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "WWPN_EXAMPLE")
    test_unit = None
    mount_disk = None

    if 'EXAMPLE' in pool_wwnn or 'EXAMPLE' in pool_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if not vm.is_alive():
        vm.start()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    online_hbas_list = nodedev.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)

    # if no online hba cards on host test fails
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    else:
        if pool_adapter_parent == "":
            pool_adapter_parent = online_hbas_list[0]

    kwargs = {'source_path': source_path,
              'source_name': source_name,
              'source_format': source_format,
              'pool_adapter_type': pool_adapter_type,
              'pool_adapter_parent': pool_adapter_parent,
              'pool_wwnn': pool_wwnn,
              'pool_wwpn': pool_wwpn}

    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    old_vhbas = nodedev.find_hbas("vhba")
    try:
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)
        utils_misc.wait_for(
            lambda: nodedev.is_vhbas_added(old_vhbas), _DELAY_TIME)
        virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
        virsh.pool_destroy(pool_name)
    except Exception as e:
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        raise exceptions.TestError(
            "Error occurred when prepare pool xml:\n %s" % e)
    if os.path.exists(pool_xml_f):
        with open(pool_xml_f, 'r') as f:
            logging.debug("Create pool from file:\n %s", f.read())

    try:
        cmd_result = virsh.pool_define(pool_xml_f, ignore_status=True,
                                       debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)
        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        test_unit = list(vol_list.keys())[0]
        logging.info(
            "Using the first LUN unit %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': 'raw'}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)

        copyfile(lun_disk_xml, disk_xml)
        attach_success = virsh.attach_device(
            vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        virsh.reboot(vm_name, debug=True)

        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)

        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output(
            'echo yes | mkfs.ext4 %s' % mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        detach_status = virsh.detach_device(vm_name, disk_xml,
                                            debug=True)
        utlv.check_exit_status(detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            logging.debug("Cleanup disk xml")
            data_dir.clean_tmp_files()
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    disk_type = params.get("disk_type")
    pool_name = params.get("pool_name")
    image_size = params.get("image_size")
    emu_image = params.get("emulated_image")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    with_shallow = "yes" == params.get("with_shallow", "no")
    with_blockdev = "yes" == params.get("with_blockdev", "no")
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    relative_path = params.get("relative_path")
    rerun_flag = 0
    blkdev_n = None
    back_n = 'blockdev-backing-iscsi'
    snapshot_external_disks = []
    # Skip/Fail early
    if with_blockdev and not libvirt_version.version_compare(1, 2, 13):
        raise exceptions.TestSkipError("--blockdev option not supported in "
                                       "current version")
    if not target:
        raise exceptions.TestSkipError("Require target disk to copy")
    if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("API acl test not supported in current"
                                       " libvirt version")
    if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url)
    if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3):
        raise exceptions.TestSkipError("--bytes option not supported in "
                                       "current version")
    if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0):
        test.cancel("Forbid using relative path or file name only is added since libvirt-3.0.0")

    # Check the source disk
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s", target, vm_name)
    else:
        raise exceptions.TestFail("Can't find %s in domain %s" % (target,
                                                                  vm_name))

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_tmp_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external --wait"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += " --raw"
    if with_blockdev:
        options += " --blockdev"
    if len(bandwidth):
        options += " --bandwidth %s" % bandwidth
    if bandwidth_byte:
        options += " --bytes"
    if with_shallow:
        options += " --shallow"

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user,
                  'debug': True, 'ignore_status': True, 'timeout': timeout}

    libvirtd_utl = utils_libvirtd.Libvirtd()
    libvirtd_conf = utils_config.LibvirtdConfig()
    libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"'
    libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log")
    libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
    logging.debug("the libvirtd config file content is:\n %s" %
                  libvirtd_conf)
    libvirtd_utl.restart()

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s", dest_path, expect)
        else:
            raise exceptions.TestFail("%s format is not %s" % (dest_path,
                                                               expect))

    def _blockjob_and_libvirtd_chk(cmd_result):
        """
        Raise TestFail when blockcopy fail with block-job-complete error or
        blockcopy hang with state change lock.
        This is a specific bug verify, so ignore status_error here.
        """
        bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592"
        err_msg = "internal error: unable to execute QEMU command"
        err_msg += " 'block-job-complete'"
        if err_msg in cmd_result.stderr:
            raise exceptions.TestFail("Hit on bug: %s" % bug_url_)

        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            raise exceptions.TestFail("Hit on bug: %s" % bug_url_)

    def _make_snapshot():
        """
        Make external disk snapshot
        """
        snap_xml = snapshot_xml.SnapshotXML()
        snapshot_name = "blockcopy_snap"
        snap_xml.snap_name = snapshot_name
        snap_xml.description = "blockcopy snapshot"

        # Add all disks into xml file.
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        # Remove non-storage disk such as 'cdrom'
        for disk in disks:
            if disk.device != 'disk':
                disks.remove(disk)
        new_disks = []
        src_disk_xml = disks[0]
        disk_xml = snap_xml.SnapDiskXML()
        disk_xml.xmltreefile = src_disk_xml.xmltreefile
        del disk_xml.device
        del disk_xml.address
        disk_xml.snapshot = "external"
        disk_xml.disk_name = disk_xml.target['dev']

        # Only qcow2 works as external snapshot file format, update it
        # here
        driver_attr = disk_xml.driver
        driver_attr.update({'type': 'qcow2'})
        disk_xml.driver = driver_attr

        new_attrs = disk_xml.source.attrs
        if 'file' in disk_xml.source.attrs:
            new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap")
            snapshot_external_disks.append(new_file)
            new_attrs.update({'file': new_file})
            hosts = None
        elif ('dev' in disk_xml.source.attrs or
              'name' in disk_xml.source.attrs or
              'pool' in disk_xml.source.attrs):
            if (disk_xml.type_name == 'block' or
                    disk_source_protocol == 'iscsi'):
                disk_xml.type_name = 'block'
                if 'name' in new_attrs:
                    del new_attrs['name']
                    del new_attrs['protocol']
                elif 'pool' in new_attrs:
                    del new_attrs['pool']
                    del new_attrs['volume']
                    del new_attrs['mode']
                back_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size="1G",
                                                       emulated_image=back_n)
                emulated_iscsi.append(back_n)
                cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                process.run(cmd, shell=True)
                new_attrs.update({'dev': back_path})
                hosts = None

        new_src_dict = {"attrs": new_attrs}
        if hosts:
            new_src_dict.update({"hosts": hosts})
        disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

        new_disks.append(disk_xml)

        snap_xml.set_disks(new_disks)
        snapshot_xml_path = snap_xml.xml
        logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

        options = "--disk-only --xmlfile %s " % snapshot_xml_path

        snapshot_result = virsh.snapshot_create(
            vm_name, options, debug=True)

        if snapshot_result.exit_status != 0:
            raise exceptions.TestFail(snapshot_result.stderr)

    snap_path = ''
    save_path = ''
    emulated_iscsi = []
    nfs_cleanup = False
    try:
        # Prepare dest_path
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if not dest_path:
            if with_blockdev:
                blkdev_n = 'blockdev-iscsi'
                dest_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size=image_size,
                                                       emulated_image=blkdev_n)
                emulated_iscsi.append(blkdev_n)
                # Make sure the new disk show up
                utils_misc.wait_for(lambda: os.path.exists(dest_path), 5)
            else:
                if copy_to_nfs:
                    tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
                dest_path = os.path.join(tmp_dir, tmp_file)

        # Domain disk replacement with desire type
        if replace_vm_disk:
            # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs
            # after test, such as pool, volume, nfs, iscsi and so on
            # TODO: remove this function in the future
            if disk_source_protocol == 'iscsi':
                emulated_iscsi.append(emu_image)
            if disk_source_protocol == 'netfs':
                nfs_cleanup = True
            utl.set_vm_disk(vm, params, tmp_dir, test)
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        if with_shallow:
            _make_snapshot()

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine("--nvram")
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command to create destination file
        if rerun_flag == 1:
            options1 = "--wait %s --finish --verbose" % dest_format
            if with_blockdev:
                options1 += " --blockdev"
            if with_shallow:
                options1 += " --shallow"
            cmd_result = virsh.blockcopy(vm_name, target,
                                         dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise exceptions.TestFail("Run blockcopy command fail: %s" %
                                          cmd_result.stdout.strip() + cmd_result.stderr)
            elif not os.path.exists(dest_path):
                raise exceptions.TestFail("Cannot find the created copy")

        # Run the real testing command
        cmd_result = virsh.blockcopy(vm_name, target, dest_path,
                                     options, **extra_dict)

        # check BZ#1197592
        _blockjob_and_libvirtd_chk(cmd_result)
        status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise exceptions.TestFail("Libvirtd service is dead")

        if not status_error:
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise exceptions.TestFail("Domain xml not expected after"
                                              " blockcopy")
                if options.count("--bandwidth"):
                    if options.count('--bytes'):
                        bandwidth += 'B'
                    else:
                        bandwidth += 'M'
                    if not utl.check_blockjob(vm_name, target, "bandwidth",
                                              bandwidth):
                        raise exceptions.TestFail("Check bandwidth failed")
                val = options.count("--pivot") + options.count("--finish")
                # Don't wait for job finish when using --byte option
                val += options.count('--bytes')
                if val == 0:
                    try:
                        finish_job(vm_name, target, timeout)
                    except JobTimeout as excpt:
                        raise exceptions.TestFail("Run command failed: %s" %
                                                  excpt)
                if options.count("--raw") and not with_blockdev:
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name, snap_opt,
                                                   ignore_status=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name, save_path,
                                     ignore_status=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
                if check_state_lock:
                    # Run blockjob pivot in subprocess as it will hang
                    # for a while, run blockjob info again to check
                    # job state
                    command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                target)
                    session = aexpect.ShellSession(command)
                    ret = virsh.blockjob(vm_name, target, "--info")
                    err_info = "cannot acquire state change lock"
                    if err_info in ret.stderr:
                        raise exceptions.TestFail("Hit on bug: %s" % bug_url)
                    utl.check_exit_status(ret, status_error)
                    session.close()
            else:
                raise exceptions.TestFail(cmd_result.stdout.strip() + cmd_result.stderr)
        else:
            if status:
                logging.debug("Expect error: %s", cmd_result.stderr)
            else:
                # Commit id '4c297728' changed how virsh exits when
                # unexpectedly failing due to timeout from a fail (1)
                # to a success(0), so we need to look for a different
                # marker to indicate the copy aborted. As "stdout: Now
                # in mirroring phase" could be in stdout which fail the
                # check, so also do check in libvirtd log to confirm.
                if options.count("--timeout") and options.count("--wait"):
                    log_pattern = "Copy aborted"
                    if (re.search(log_pattern, cmd_result.stdout.strip()) or
                            chk_libvirtd_log(libvirtd_log_path,
                                             log_pattern, "debug")):
                        logging.debug("Found success a timed out block copy")
                else:
                    raise exceptions.TestFail("Expect fail, but run "
                                              "successfully: %s" % bug_url)
    finally:
        # Recover VM may fail unexpectedly, we need using try/except to
        # proceed the following cleanup steps
        try:
            # Abort exist blockjob to avoid any possible lock error
            virsh.blockjob(vm_name, target, '--abort', ignore_status=True)
            vm.destroy(gracefully=False)
            # It may take a long time to shutdown the VM which has
            # blockjob running
            utils_misc.wait_for(
                lambda: virsh.domstate(vm_name,
                                       ignore_status=True).exit_status, 180)
            if virsh.domain_exists(vm_name):
                if active_snap or with_shallow:
                    option = "--snapshots-metadata"
                else:
                    option = None
                original_xml.sync(option)
            else:
                original_xml.define()
        except Exception as e:
            logging.error(e)
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up libvirt pool, which may be created by 'set_vm_disk'
        if disk_type == 'volume':
            virsh.pool_destroy(pool_name, ignore_status=True, debug=True)
        # Restore libvirtd conf and restart libvirtd
        libvirtd_conf.restore()
        libvirtd_utl.restart()
        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
        # Clean up NFS
        try:
            if nfs_cleanup:
                utl.setup_or_cleanup_nfs(is_setup=False)
        except Exception as e:
            logging.error(e)
        # Clean up iSCSI
        try:
            for iscsi_n in list(set(emulated_iscsi)):
                utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n)
                # iscsid will be restarted, so give it a break before next loop
                time.sleep(5)
        except Exception as e:
            logging.error(e)
        if os.path.exists(dest_path):
            os.remove(dest_path)
        if os.path.exists(snap_path):
            os.remove(snap_path)
        if os.path.exists(save_path):
            os.remove(save_path)
        # Restart virtlogd service to release VM log file lock
        try:
            path.find_command('virtlogd')
            process.run('systemctl reset-failed virtlogd')
            process.run('systemctl restart virtlogd ')
        except path.CmdNotFoundError:
            pass