Example #1
0
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    cleanup_env = [False, False, False]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current" +
                                    " libvirt version.")

    acl_dargs = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True
    }

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utils_test.libvirt.define_pool(pool_name, pool_type,
                                                pool_target, cleanup_env)
        check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            check_exit_status(result)

        # Step (3)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
            # Options --overwrite and --no-overwrite can only be used to
            # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name,
                                          option,
                                          ignore_status=True)
            check_exit_status(result, build_error)
            if build_error:
                # Redo under negative case to keep case continue
                result = virsh.pool_build(pool_name,
                                          option,
                                          ignore_status=True)
                check_exit_status(result)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected." %
                                     pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_env[1]:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
Example #2
0
    options = "%s %s " % (source_host, source_port) + options
    if ro_flag:
        logging.debug("Readonly mode test")
    try:
        cmd_result = virsh.find_storage_pool_sources_as(source_type,
                                                        options,
                                                        ignore_status=True,
                                                        debug=True,
                                                        readonly=ro_flag)
        output = cmd_result.stdout.strip()
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            else:
                logging.debug("Command outout:\n%s", output)
        elif status_error and status == 0:
            raise error.TestFail("Expect fail, but run successfully")
    finally:
        # Clean up
        if cleanup_logical:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_iscsi:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_nfs:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
Example #3
0
def run(test, params, env):
    """
    Test migration with option --copy-storage-all or --copy-storage-inc.
    """
    vm = env.get_vm(params.get("main_vm"))
    disk_type = params.get("copy_storage_type", "file")
    if disk_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "block"
    primary_target = vm.get_first_disk_devices()["target"]
    file_path, file_size = vm.get_device_size(primary_target)
    # Convert to Gib
    file_size = int(file_size) / 1073741824

    remote_host = params.get("remote_ip", "REMOTE.EXAMPLE")
    local_host = params.get("local_ip", "LOCAL.EXAMPLE")
    remote_user = params.get("remote_user", "root")
    remote_passwd = params.get("remote_pwd", "PASSWORD.EXAMPLE")
    if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
        raise error.TestNAError("Config remote or local host first.")
    # Config ssh autologin for it
    ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)

    # Attach additional disks to vm if disk count big than 1
    disks_count = int(params.get("added_disks_count", 1)) - 1
    if disks_count:
        new_vm_name = "%s_smtest" % vm.name
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    vms = [vm]
    if vm.is_dead():
        vm.start()

    # Abnormal parameters
    migrate_again = "yes" == params.get("migrate_again", "no")
    abnormal_type = params.get("abnormal_type")

    try:
        rdm = utils_test.RemoteDiskManager(params)
        vgname = params.get("sm_vg_name", "SMTEST")
        added_disks_list = []
        if disk_type == "lvm":
            target1 = target2 = ""  # For cleanup
            # Create volume group with iscsi
            # For local, target is a device name
            target1 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True,
                                                  emulated_image="emulated_iscsi1")
            lv_utils.vg_create(vgname, target1)
            logging.debug("Created VG %s", vgname)
            # For remote, target is real target name
            target2 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=False,
                                                  emulated_image="emulated_iscsi2")
            logging.debug("Created target: %s", target2)
            # Login on remote host
            remote_device = rdm.iscsi_login_setup(local_host, target2)
            if not rdm.create_vg(vgname, remote_device):
                raise error.TestError("Create VG %s on %s failed."
                                      % (vgname, remote_host))

        all_disks = utlv.attach_disks(vm, file_path, vgname, params)
        # Reserve for cleanup
        added_disks_list = all_disks.keys()
        all_disks[file_path] = file_size
        logging.debug("All disks need to be migrated:%s", all_disks)

        if abnormal_type == "occupied_disk":
            occupied_path = rdm.occupy_space(disk_type, file_size,
                                             file_path, vgname, timeout=600)
        if not abnormal_type == "not_exist_file":
            for disk, size in all_disks.items():
                if disk == file_path:
                    rdm.create_image("file", disk, size, None, None)
                else:
                    rdm.create_image(disk_type, disk, size, vgname,
                                     os.path.basename(disk))

        fail_flag = False
        try:
            logging.debug("Start migration...")
            copied_migration(vms, params)
            if migrate_again:
                fail_flag = True
                raise error.TestFail("Migration succeed, but not expected!")
            else:
                return
        except error.TestFail:
            if not migrate_again:
                raise

            if abnormal_type == "occupied_disk":
                rdm.remove_path(disk_type, occupied_path)
            elif abnormal_type == "not_exist_file":
                for disk, size in all_disks.items():
                    if disk == file_path:
                        rdm.create_image("file", disk, size, None, None)
                    else:
                        rdm.create_image(disk_type, disk, size, vgname,
                                         os.path.basename(disk))
            elif abnormal_type == "migration_interupted":
                params["thread_timeout"] = 120
            # Raise after cleanup
            if fail_flag:
                raise

            # Migrate it again to confirm failed reason
            copied_migration(vms, params)
    finally:
        # Recover created vm
        if vm.is_alive():
            vm.destroy()
        if disks_count and vm.name == new_vm_name:
            vm.undefine()
        for disk in added_disks_list:
            utlv.delete_local_disk(disk_type, disk)
            rdm.remove_path(disk_type, disk)
        rdm.remove_path("file", file_path)
        if disk_type == "lvm":
            rdm.remove_vg(vgname)
            rdm.iscsi_login_setup(local_host, target2, is_login=False)
            try:
                lv_utils.vg_remove(vgname)
            except:
                pass    # let it go to confirm cleanup iscsi device
            utlv.setup_or_cleanup_iscsi(is_setup=False,
                                        emulated_image="emulated_iscsi1")
            utlv.setup_or_cleanup_iscsi(is_setup=False,
                                        emulated_image="emulated_iscsi2")
Example #4
0
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    cleanup_env = [False, False, False]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    + " libvirt version.")

    acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user,
                 'debug': True}

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utils_test.libvirt.define_pool(pool_name, pool_type,
                                                pool_target, cleanup_env)
        check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            check_exit_status(result)

        # Step (3)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
        # Options --overwrite and --no-overwrite can only be used to
        # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name, option,
                                          ignore_status=True)
            check_exit_status(result, build_error)
            if build_error:
                # Redo under negative case to keep case continue
                result = virsh.pool_build(pool_name, option,
                                          ignore_status=True)
                check_exit_status(result)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected."
                                     % pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_env[1]:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
Example #5
0
        af_fstrim_cpy = get_disk_capacity(disk_type, imagefile=device_path,
                                          lvname="lvthin")
        logging.debug("\nBefore occupying disk:%s\n"
                      "After occupied disk:%s\n"
                      "After fstrim operation:%s",
                      bf_cpy, bf_fstrim_cpy, af_fstrim_cpy)
        # Check results
        if fstrim_type in ["fstrim_cmd", "qemu-guest-agent"]:
            if not sig_delta(bf_fstrim_cpy, af_fstrim_cpy) and \
                    not status_error:
                raise error.TestFail("Manual 'fstrims' didn't work.")
        elif fstrim_type == "mount_with_discard":
            if sig_delta(bf_cpy, bf_fstrim_cpy) and not status_error:
                raise error.TestFail("Automatical 'fstrims' didn't work.")
    finally:
        if new_vm.is_alive():
            new_vm.destroy()
        new_vm.undefine()
        if disk_type == "block":
            try:
                lv_utils.lv_remove("vgthin", "lvthin")
            except error.TestError, detail:
                logging.debug(str(detail))
            try:
                lv_utils.vg_remove("vgthin")
            except error.TestError, detail:
                logging.debug(str(detail))
            utils.run("pvremove -f %s" % discard_device, ignore_status=True)
            if create_iscsi:
                utlv.setup_or_cleanup_iscsi(is_setup=False)
    try:
        cmd_result = virsh.find_storage_pool_sources(
            source_type,
            srcSpec.name,
            ignore_status=True,
            debug=True,
            unprivileged_user=unprivileged_user,
            uri=uri,
            readonly=ro_flag)
        output = cmd_result.stdout.strip()
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            else:
                logging.debug("Command output:\n%s", output)
        elif status_error and status == 0:
            raise error.TestFail("Expect fail, but run successfully")
    finally:
        # Clean up
        if cleanup_logical:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_iscsi:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_nfs:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
Example #7
0
def run(test, params, env):
    """
    DiskXML has an attribute named discard for fstrim operations.
    (Only supported after special libvirt version.)
    These are test cases for it:
    """
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    if vm.is_dead():
        vm.start()
        vm.wait_for_login()
    bf_disks = get_vm_disks(vm)
    vm.destroy()

    # Backup VM XML file
    backupvmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

    disk_type = params.get("disk_type", "file")
    discard_device = params.get("discard_device", "/DEV/EXAMPLE")
    fstrim_type = params.get("fstrim_type", "fstrim_cmd")
    try:
        if disk_type == "file":
            device_dir = data_dir.get_tmp_dir()
            params["image_name"] = "discard_test"
            params["image_format"] = "raw"
            params["image_size"] = "1G"
            qs = qemu_storage.QemuImg(params, device_dir, "")
            device_path, _ = qs.create(params)
        else:
            if not discard_device.count("/DEV/EXAMPLE"):
                device_path = discard_device
            else:
                discard_device = create_iscsi_device()
                device_path = create_volume(discard_device)

        discard_type = params.get("discard_type", "ignore")
        target_bus = params.get("storage_target_bus", "virtio")
        target_dev = params.get("storage_target_dev", "vdb")
        status_error = "yes" == params.get("status_error", "no")
        xmlfile = create_disk_xml(disk_type, device_path, discard_type,
                                  target_dev, target_bus)
        virsh.attach_device(domain_opt=vm_name, file_opt=xmlfile,
                            flagstr="--persistent", ignore_status=False)
        if fstrim_type == "qemu-guest-agent":
            channelfile = create_channel_xml(vm_name)
            virsh.attach_device(domain_opt=vm_name, file_opt=channelfile,
                                flagstr="--persistent", ignore_status=False)
        logging.debug("New VMXML:\n%s", virsh.dumpxml(vm_name))

        # Verify attached device in vm
        if vm.is_dead():
            vm.start()
        vm.wait_for_login()
        af_disks = get_vm_disks(vm)
        logging.debug("\nBefore:%s\nAfter:%s", bf_disks, af_disks)
        # Get new disk name in vm
        new_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not new_disk:
            raise error.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", new_disk)

        # Occupt space of new disk
        frmt_type = params.get("discard_format", "ext4")
        if fstrim_type == "mount_with_discard":
            mount_options = "discard"
        else:
            mount_options = None

        bf_cpy = get_disk_capacity(disk_type, imagefile=device_path,
                                   lvname="lvthin")
        logging.debug("Disk size before using:%s", bf_cpy)
        occupy_disk(vm, new_disk, "500", frmt_type, mount_options)
        bf_fstrim_cpy = get_disk_capacity(disk_type, imagefile=device_path,
                                          lvname="lvthin")
        logging.debug("Disk size after used:%s", bf_fstrim_cpy)
        do_fstrim(fstrim_type, vm, status_error)
        af_fstrim_cpy = get_disk_capacity(disk_type, imagefile=device_path,
                                          lvname="lvthin")
        logging.debug("\nBefore occupying disk:%s\n"
                      "After occupied disk:%s\n"
                      "After fstrim operation:%s",
                      bf_cpy, bf_fstrim_cpy, af_fstrim_cpy)
        # Check results
        if fstrim_type in ["fstrim_cmd", "qemu-guest-agent"]:
            if not sig_delta(bf_fstrim_cpy, af_fstrim_cpy) and \
                    not status_error:
                raise error.TestFail("Manual 'fstrims' didn't work.")
        elif fstrim_type == "mount_with_discard":
            if sig_delta(bf_cpy, bf_fstrim_cpy) and not status_error:
                raise error.TestFail("Automatical 'fstrims' didn't work.")
    finally:
        if vm.is_alive():
            vm.destroy()
        try:
            backupvmxml.sync()
        except xcepts.LibvirtXMLError:
            # TODO: provide another way to clean it up
            pass    # Do following steps anyway
        if disk_type == "block":
            try:
                lv_utils.vg_remove("vgthin")
            except error.TestError:
                pass
            utils.run("pvremove -f %s" % discard_device, ignore_status=True)
            utlv.setup_or_cleanup_iscsi(is_setup=False)
Example #8
0
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster
    cleanup_env = [False, False, False, "", False]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    acl_dargs = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True
    }

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        if list_dumpxml_acl:
            result = virsh.pool_list(option, **acl_dargs)
        else:
            result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    kwargs = {'source_format': params.get('pool_source_format', 'ext4')}
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utlv.define_pool(pool_name, pool_type, pool_target,
                                  cleanup_env, **kwargs)
        utlv.check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        if list_dumpxml_acl:
            xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs)
        else:
            xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        utlv.check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            utlv.check_exit_status(result)

        # Step (3)
        # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool
        # disk/fs pool: as prepare step already make label and create filesystem
        #               for the disk, use '--overwrite' is necessary
        # logical_pool: build pool will fail if VG already exist, BZ#1373711
        if pool_type != "logical":
            option = ''
            if pool_type in ['disk', 'fs']:
                option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name,
                                          option,
                                          ignore_status=True)
            utlv.check_exit_status(result, build_error)
        if build_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)

        # For iSCSI pool, we need discover targets before start the pool
        if pool_type == 'iscsi':
            cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1'
            process.run(cmd, shell=True)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected." %
                                     pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            utlv.check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        utlv.check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            utlv.check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = process.system_output(cmd, shell=True)
            lv_utils.vg_remove(vg_name)
            process.run("pvremove %s" % pv_name, shell=True)
        if cleanup_env[1]:
            utlv.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utlv.setup_or_cleanup_nfs(False, restore_selinux=cleanup_env[3])
def run(test, params, env):
    """
    Test command: virsh find-storage-pool-sources-as

    1. Prepare env to provide source storage:
       1). For 'netfs' source type, setup nfs server
       2). For 'iscsi' source type, setup iscsi server
       3). For 'logcial' type pool, setup iscsi storage to create vg
    2. Find the pool source by running virsh cmd
    """

    source_type = params.get("source_type", "")
    source_host = params.get("source_host", "localhost")
    source_port = params.get("source_port", "")
    options = params.get("extra_options", "")
    vg_name = params.get("vg_name", "virttest_vg_0")
    ro_flag = "yes" == params.get("readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")

    if not source_type:
        raise error.TestFail("Command requires <type> value")

    cleanup_nfs = False
    cleanup_iscsi = False
    cleanup_logical = False

    if source_host == "localhost":
        if source_type == "netfs":
            # Set up nfs
            utils_test.libvirt.setup_or_cleanup_nfs(True)
            cleanup_nfs = True
        if source_type in ["iscsi", "logical"]:
            # Set up iscsi
            iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True)
            cleanup_iscsi = True
            if source_type == "logical":
                # Create VG by using iscsi device
                lv_utils.vg_create(vg_name, iscsi_device)
                cleanup_logical = True

    # Run virsh cmd
    options = "%s %s " % (source_host, source_port) + options
    if ro_flag:
        logging.debug("Readonly mode test")
    try:
        cmd_result = virsh.find_storage_pool_sources_as(source_type,
                                                        options,
                                                        ignore_status=True,
                                                        debug=True,
                                                        readonly=ro_flag)
        output = cmd_result.stdout.strip()
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            else:
                logging.debug("Command outout:\n%s", output)
        elif status_error and status == 0:
            raise error.TestFail("Expect fail, but run successfully")
    finally:
        # Clean up
        if cleanup_logical:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_iscsi:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_nfs:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
Example #10
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Destroy the pool
    (21) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (22) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    cleanup_env = [False, False, False]

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug(
                "Find volume '%s' in pool '%s'.", vol_name, pool_name)
        else:
            raise error.TestFail(
                "Not find volume '%s' in pool '%s'." %
                (vol_name, pool_name))

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            raise error.TestFail("Pool info dictionary is needed.")
        if pool_info[check_point] == value:
            logging.debug("Pool '%s' is '%s'.", check_point, value)
        else:
            raise error.TestFail("Pool '%s' isn't '%s'." % (check_point,
                                                            value))

    # Run Testcase
    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        result = utils_test.libvirt.define_pool(pool_name, pool_type,
                                                pool_target, cleanup_env)
        check_exit_status(result, status_error)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (4)
        # Undefine pool
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(pool_xml)
        check_exit_status(result, status_error)

        # Step (6)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
        # Options --overwrite and --no-overwrite can only be used to
        # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            check_exit_status(result)

        # Step (7)
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)

        # Step (8)
        # Pool list
        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (9)
        # Pool autostart
        result = virsh.pool_autostart(pool_name, ignore_status=True)
        check_exit_status(result)

        # Step (10)
        # Pool list
        option = "--autostart --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (11)
        # Restart libvirtd and check the autostart pool
        utils_libvirtd.libvirtd_restart()
        option = "--autostart --persistent"
        check_pool_list(pool_name, option)

        # Step (12)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (13)
        # Pool autostart disable
        result = virsh.pool_autostart(pool_name, "--disable",
                                      ignore_status=True)
        check_exit_status(result)

        # Step (14)
        # Repeat step (11)
        utils_libvirtd.libvirtd_restart()
        option = "--autostart"
        check_pool_list(pool_name, option, True)

        # Step (15)
        # Pool start
        # If the filesystem cntaining the directory is mounted, then the
        # directory will show as running, which means the local 'dir' pool
        # don't need start after restart libvirtd
        if pool_type != "dir":
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        # Step (16)
        # Pool info
        pool_info = _pool.pool_info(pool_name)
        logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

        # Step (17)
        # Pool UUID
        result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
        check_exit_status(result)
        check_pool_info(pool_info, "UUID", result.stdout.strip())

        # Step (18)
        # Pool Name
        result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
        check_exit_status(result)
        check_pool_info(pool_info, "Name", result.stdout.strip())

        # Step (19)
        # Pool refresh for 'dir' type pool
        if pool_type == "dir":
            os.mknod(vol_path)
            result = virsh.pool_refresh(pool_name)
            check_exit_status(result)
            check_vol_list(vol_name, pool_name)

        # Step(20)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (21)
        # Pool delete for 'dir' type pool
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if os.path.exists(pool_target):
                raise error.TestFail("The target path '%s' still exist." %
                                     pool_target)
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result, True)

        # Step (22)
        # Pool undefine
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_env[1]:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
def run(test, params, env):
    """
    Test command: virsh find-storage-pool-sources

    1. Prepare env to provide source storage if use localhost:
       1). For 'netfs' source type, setup nfs server
       2). For 'iscsi' source type, setup iscsi server
       3). For 'logcial' type pool, setup iscsi storage to create vg
       4). Prepare srcSpec xml file if not given
    2. Find the pool sources by running virsh cmd
    """

    source_type = params.get("source_type", "")
    source_host = params.get("source_host", "localhost")
    srcSpec = params.get("source_Spec", "")
    vg_name = params.get("vg_name", "virttest_vg_0")
    ro_flag = "yes" == params.get("readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")

    if not source_type:
        raise error.TestFail("Command requires <type> value")

    cleanup_nfs = False
    cleanup_iscsi = False
    cleanup_logical = False

    # Prepare source storage
    if source_host == "localhost":
        if source_type == "netfs":
            # Set up nfs
            utils_test.libvirt.setup_or_cleanup_nfs(True)
            cleanup_nfs = True
        if source_type in ["iscsi", "logical"]:
            # Set up iscsi
            iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True)
            cleanup_iscsi = True
            if source_type == "logical":
                # Create vg by using iscsi device
                lv_utils.vg_create(vg_name, iscsi_device)
                cleanup_logical = True

    # Prepare srcSpec xml
    if not srcSpec or srcSpec == "INVALID.XML":
        srcSpec = xml_utils.TempXMLFile()
        if srcSpec == "INVALID.XML":
            src_xml = "<invalid><host name='#@!'/><?source>"
        else:
            src_xml = "<source><host name='%s'/></source>" % source_host
        srcSpec.write(src_xml)
        srcSpec.flush()
    logging.debug("srcSpec file content:\n%s", file(srcSpec.name).read())

    if ro_flag:
        logging.debug("Readonly mode test")

    # Run virsh cmd
    try:
        cmd_result = virsh.find_storage_pool_sources(
            source_type,
            srcSpec.name,
            ignore_status=True,
            debug=True,
            readonly=ro_flag)
        output = cmd_result.stdout.strip()
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            else:
                logging.debug("Command output:\n%s", output)
        elif status_error and status == 0:
            raise error.TestFail("Expect fail, but run successfully")
    finally:
        # Clean up
        if cleanup_logical:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_iscsi:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_nfs:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
def run(test, params, env):
    """
    Test command: virsh find-storage-pool-sources-as

    1. Prepare env to provide source storage:
       1). For 'netfs' source type, setup nfs server
       2). For 'iscsi' source type, setup iscsi server
       3). For 'logcial' type pool, setup iscsi storage to create vg
    2. Find the pool source by running virsh cmd
    """

    source_type = params.get("source_type", "")
    source_host = params.get("source_host", "localhost")
    source_port = params.get("source_port", "")
    options = params.get("extra_options", "")
    vg_name = params.get("vg_name", "virttest_vg_0")
    ro_flag = "yes" == params.get("readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")

    if not source_type:
        raise error.TestFail("Command requires <type> value")

    cleanup_nfs = False
    cleanup_iscsi = False
    cleanup_logical = False

    if source_host == "localhost":
        if source_type == "netfs":
            # Set up nfs
            utils_test.libvirt.setup_or_cleanup_nfs(True)
            cleanup_nfs = True
        if source_type in ["iscsi", "logical"]:
            # Set up iscsi
            iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True)
            cleanup_iscsi = True
            if source_type == "logical":
                # Create VG by using iscsi device
                lv_utils.vg_create(vg_name, iscsi_device)
                cleanup_logical = True

    # Run virsh cmd
    options = "%s %s " % (source_host, source_port) + options
    if ro_flag:
        logging.debug("Readonly mode test")
    try:
        cmd_result = virsh.find_storage_pool_sources_as(
            source_type,
            options,
            ignore_status=True,
            debug=True,
            readonly=ro_flag)
        output = cmd_result.stdout.strip()
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            else:
                logging.debug("Command outout:\n%s", output)
        elif status_error and status == 0:
            raise error.TestFail("Expect fail, but run successfully")
    finally:
        # Clean up
        if cleanup_logical:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_iscsi:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_nfs:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
Example #13
0
def run(test, params, env):
    """
    DiskXML has an attribute named discard for fstrim operations.
    (Only supported after special libvirt version.)
    These are test cases for it:
    """
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    if vm.is_dead():
        vm.start()
        vm.wait_for_login()
    bf_disks = get_vm_disks(vm)
    vm.destroy()

    # Backup VM XML file
    backupvmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

    disk_type = params.get("disk_type", "file")
    discard_device = params.get("discard_device", "/DEV/EXAMPLE")
    fstrim_type = params.get("fstrim_type", "fstrim_cmd")
    try:
        if disk_type == "file":
            device_dir = data_dir.get_tmp_dir()
            params["image_name"] = "discard_test"
            params["image_format"] = "raw"
            params["image_size"] = "1G"
            qs = qemu_storage.QemuImg(params, device_dir, "")
            device_path, _ = qs.create(params)
        else:
            if not discard_device.count("/DEV/EXAMPLE"):
                device_path = discard_device
            else:
                discard_device = create_iscsi_device()
                device_path = create_volume(discard_device)

        discard_type = params.get("discard_type", "ignore")
        target_bus = params.get("storage_target_bus", "virtio")
        target_dev = params.get("storage_target_dev", "vdb")
        status_error = "yes" == params.get("status_error", "no")
        xmlfile = create_disk_xml(disk_type, device_path, discard_type,
                                  target_dev, target_bus)
        virsh.attach_device(domain_opt=vm_name,
                            file_opt=xmlfile,
                            flagstr="--persistent",
                            ignore_status=False)
        if fstrim_type == "qemu-guest-agent":
            channelfile = create_channel_xml(vm_name)
            virsh.attach_device(domain_opt=vm_name,
                                file_opt=channelfile,
                                flagstr="--persistent",
                                ignore_status=False)
        logging.debug("New VMXML:\n%s", virsh.dumpxml(vm_name))

        # Verify attached device in vm
        if vm.is_dead():
            vm.start()
        vm.wait_for_login()
        af_disks = get_vm_disks(vm)
        logging.debug("\nBefore:%s\nAfter:%s", bf_disks, af_disks)
        # Get new disk name in vm
        new_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not new_disk:
            raise error.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", new_disk)

        # Occupt space of new disk
        frmt_type = params.get("discard_format", "ext4")
        if fstrim_type == "mount_with_discard":
            mount_options = "discard"
        else:
            mount_options = None

        bf_cpy = get_disk_capacity(disk_type,
                                   imagefile=device_path,
                                   lvname="lvthin")
        logging.debug("Disk size before using:%s", bf_cpy)
        occupy_disk(vm, new_disk, "500", frmt_type, mount_options)
        bf_fstrim_cpy = get_disk_capacity(disk_type,
                                          imagefile=device_path,
                                          lvname="lvthin")
        logging.debug("Disk size after used:%s", bf_fstrim_cpy)
        do_fstrim(fstrim_type, vm, status_error)
        af_fstrim_cpy = get_disk_capacity(disk_type,
                                          imagefile=device_path,
                                          lvname="lvthin")
        logging.debug(
            "\nBefore occupying disk:%s\n"
            "After occupied disk:%s\n"
            "After fstrim operation:%s", bf_cpy, bf_fstrim_cpy, af_fstrim_cpy)
        # Check results
        if fstrim_type in ["fstrim_cmd", "qemu-guest-agent"]:
            if not sig_delta(bf_fstrim_cpy, af_fstrim_cpy) and \
                    not status_error:
                raise error.TestFail("Manual 'fstrims' didn't work.")
        elif fstrim_type == "mount_with_discard":
            if sig_delta(bf_cpy, bf_fstrim_cpy) and not status_error:
                raise error.TestFail("Automatical 'fstrims' didn't work.")
    finally:
        if vm.is_alive():
            vm.destroy()
        try:
            backupvmxml.sync()
        except xcepts.LibvirtXMLError:
            # TODO: provide another way to clean it up
            pass  # Do following steps anyway
        if disk_type == "block":
            try:
                lv_utils.vg_remove("vgthin")
            except error.TestError:
                pass
            utils.run("pvremove -f %s" % discard_device, ignore_status=True)
            utlv.setup_or_cleanup_iscsi(is_setup=False)
Example #14
0
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster
    cleanup_env = [False, False, False, "", False]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user,
                 'debug': True}

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        if list_dumpxml_acl:
            result = virsh.pool_list(option, **acl_dargs)
        else:
            result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    kwargs = {'source_format': params.get('pool_source_format', 'ext4')}
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utlv.define_pool(pool_name, pool_type, pool_target,
                                  cleanup_env, **kwargs)
        utlv.check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        if list_dumpxml_acl:
            xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs)
        else:
            xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        utlv.check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            utlv.check_exit_status(result)

        # Step (3)
        # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool
        # disk/fs pool: as prepare step already make label and create filesystem
        #               for the disk, use '--overwrite' is necessary
        # logical_pool: build pool will fail if VG already exist, BZ#1373711
        if pool_type != "logical":
            option = ''
            if pool_type in ['disk', 'fs']:
                option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name, option,
                                          ignore_status=True)
            utlv.check_exit_status(result, build_error)
        if build_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_build(pool_name, option,
                                      ignore_status=True)
            utlv.check_exit_status(result)

        # For iSCSI pool, we need discover targets before start the pool
        if pool_type == 'iscsi':
            cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1'
            process.run(cmd, shell=True)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected."
                                     % pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            utlv.check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        utlv.check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            utlv.check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = process.system_output(cmd, shell=True)
            lv_utils.vg_remove(vg_name)
            process.run("pvremove %s" % pv_name, shell=True)
        if cleanup_env[1]:
            utlv.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utlv.setup_or_cleanup_nfs(
                False, restore_selinux=cleanup_env[3])
Example #15
0
                                          imagefile=device_path,
                                          lvname="lvthin")
        logging.debug(
            "\nBefore occupying disk:%s\n"
            "After occupied disk:%s\n"
            "After fstrim operation:%s", bf_cpy, bf_fstrim_cpy, af_fstrim_cpy)
        # Check results
        if fstrim_type in ["fstrim_cmd", "qemu-guest-agent"]:
            if not sig_delta(bf_fstrim_cpy, af_fstrim_cpy) and \
                    not status_error:
                raise error.TestFail("Manual 'fstrims' didn't work.")
        elif fstrim_type == "mount_with_discard":
            if sig_delta(bf_cpy, bf_fstrim_cpy) and not status_error:
                raise error.TestFail("Automatical 'fstrims' didn't work.")
    finally:
        if new_vm.is_alive():
            new_vm.destroy()
        new_vm.undefine()
        if disk_type == "block":
            try:
                lv_utils.lv_remove("vgthin", "lvthin")
            except error.TestError, detail:
                logging.debug(str(detail))
            try:
                lv_utils.vg_remove("vgthin")
            except error.TestError, detail:
                logging.debug(str(detail))
            utils.run("pvremove -f %s" % discard_device, ignore_status=True)
            if create_iscsi:
                utlv.setup_or_cleanup_iscsi(is_setup=False)