Esempio n. 1
0
    def restore(self, name):
        pool = name
        name = pool['name']
        pools = self.current_state
        if name in pools:
            self.remove(pools[name])

        pool_file = tempfile.NamedTemporaryFile(delete=False)
        fname = pool_file.name
        pool_file.writelines(pool['inactive xml'])
        pool_file.close()

        try:
            if pool['persistent'] == 'yes':
                res = virsh.pool_define(fname)
                if res.exit_status:
                    raise Exception(str(res))
                if pool['state'] == 'running':
                    res = virsh.pool_start(name)
                    if res.exit_status:
                        raise Exception(str(res))
            else:
                res = virsh.pool_create(fname)
                if res.exit_status:
                    raise Exception(str(res))
        except Exception, e:
            raise e
    def create_pool(p_name, p_type, p_target):
        """
        Define and start a pool.

        :param p_name. Pool name.
        :param p_type. Pool type.
        :param p_target. Pool target path.
        """
        p_xml = pool_xml.PoolXML(pool_type=p_type)
        p_xml.name = p_name
        p_xml.target_path = p_target

        if not os.path.exists(p_target):
            os.mkdir(p_target)
        p_xml.xmltreefile.write()
        ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_build(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_start(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
    def create_pool(p_name, p_type, p_target):
        """
        Define and start a pool.

        :param p_name. Pool name.
        :param p_type. Pool type.
        :param p_target. Pool target path.
        """
        p_xml = pool_xml.PoolXML(pool_type=p_type)
        p_xml.name = p_name
        p_xml.target_path = p_target

        if not os.path.exists(p_target):
            os.mkdir(p_target)
        p_xml.xmltreefile.write()
        ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_build(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_start(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
Esempio n. 4
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get('ip_protocal', 'ipv4')

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find volume '%s' in pool '%s'.", vol_name,
                          pool_name)
        else:
            raise error.TestFail("Not find volume '%s' in pool '%s'." %
                                 (vol_name, pool_name))

    def is_in_range(actual, expected, error_percent):
        deviation = 100 - (100 * (float(actual) / float(expected)))
        logging.debug("Deviation: %0.2f%%", float(deviation))
        return float(deviation) <= float(error_percent)

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            raise error.TestFail("Pool info dictionary is needed.")
        val_tup = ('Capacity', 'Allocation', 'Available')
        if check_point in val_tup and float(value.split()[0]):
            # As from bytes to GiB, could cause deviation, and it should not
            # exceed 1 percent.
            if is_in_range(float(pool_info[check_point].split()[0]),
                           float(value.split()[0]), 1):
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                raise error.TestFail("Pool '%s' isn't '%s'." %
                                     (check_point, value))
        else:
            if pool_info[check_point] == value:
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                raise error.TestFail("Pool '%s' isn't '%s'." %
                                     (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {
        'image_size': '1G',
        'pre_disk_vol': ['1M'],
        'source_name': source_name,
        'source_path': source_path,
        'source_format': source_format,
        'persistent': True,
        'ip_protocal': ip_protocal
    }
    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (4)
        # Undefine pool
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(pool_xml)
        utlv.check_exit_status(result, status_error)

        # Step (6)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
            # Options --overwrite and --no-overwrite can only be used to
            # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)

        # Step (7)
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        # Step (8)
        # Pool list
        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (9)
        # Pool autostart
        result = virsh.pool_autostart(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        # Step (10)
        # Pool list
        option = "--autostart --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (11)
        # Restart libvirtd and check the autostart pool
        utils_libvirtd.libvirtd_restart()
        option = "--autostart --persistent"
        check_pool_list(pool_name, option)

        # Step (12)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (13)
        # Pool autostart disable
        result = virsh.pool_autostart(pool_name,
                                      "--disable",
                                      ignore_status=True)
        utlv.check_exit_status(result)

        # Step (14)
        # Repeat step (11)
        utils_libvirtd.libvirtd_restart()
        option = "--autostart"
        check_pool_list(pool_name, option, True)

        # Step (15)
        # Pool start
        # When libvirtd starts up, it'll check to see if any of the storage
        # pools have been activated externally. If so, then it'll mark the
        # pool as active. This is independent of autostart.
        # So a directory based storage pool is thus pretty much always active,
        # and so as the SCSI pool.
        if pool_type not in ["dir", 'scsi']:
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

        # Step (16)
        # Pool info
        pool_info = _pool.pool_info(pool_name)
        logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

        # Step (17)
        # Pool UUID
        result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_info(pool_info, "UUID", result.stdout.strip())

        # Step (18)
        # Pool Name
        result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_info(pool_info, "Name", result.stdout.strip())

        # Step (19)
        # Pool refresh for 'dir' type pool
        if pool_type == "dir":
            os.mknod(vol_path)
            result = virsh.pool_refresh(pool_name)
            utlv.check_exit_status(result)
            check_vol_list(vol_name, pool_name)

        # Step (20)
        # Create an over size vol in pool(expect fail), then check pool:
        # 'Capacity', 'Allocation' and 'Available'
        # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
        # and glusterfs pool not support create volume, so not test them
        if pool_type != "netfs":
            vol_capacity = "10000G"
            vol_allocation = "10000G"
            result = virsh.vol_create_as("oversize_vol", pool_name,
                                         vol_capacity, vol_allocation, "raw")
            utlv.check_exit_status(result, True)
            new_info = _pool.pool_info(pool_name)
            check_pool_info(pool_info, "Capacity", new_info['Capacity'])
            check_pool_info(pool_info, "Allocation", new_info['Allocation'])
            check_pool_info(pool_info, "Available", new_info['Available'])

        # Step (21)
        # Undefine pool, this should fail as the pool is active
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result, expect_error=True)
        check_pool_list(pool_name, "", False)

        # Step (22)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (23)
        # Pool delete for 'dir' type pool
        if pool_type == "dir":
            for f in os.listdir(pool_target):
                os.remove(os.path.join(pool_target, f))
            result = virsh.pool_delete(pool_name, ignore_status=True)
            utlv.check_exit_status(result)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if os.path.exists(pool_target):
                raise error.TestFail("The target path '%s' still exist." %
                                     pool_target)
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result, True)

        # Step (24)
        # Pool undefine
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **kwargs)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
def run(test, params, env):
    """
    Test pool command:virsh pool_autostart

    1) Define a given type pool
    2) Mark pool as autostart
    3) Restart libvirtd and check pool
    4) Destroy the pool
    5) Unmark pool as autostart
    6) Repeate step(3)
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    ip_protocal = params.get("ip_protocal", "ipv4")
    pool_ref = params.get("pool_ref", "name")
    pool_uuid = params.get("pool_uuid", "")
    invalid_source_path = params.get("invalid_source_path", "")
    status_error = "yes" == params.get("status_error", "no")
    readonly_mode = "yes" == params.get("readonly_mode", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "yes")
    disk_type = params.get("disk_type", "")
    vg_name = params.get("vg_name", "")
    lv_name = params.get("lv_name", "")
    update_policy = params.get("update_policy")

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    if pool_target is "":
        pool_target = os.path.join(test.tmpdir, pool_target)

    # The file for dumped pool xml
    p_xml = os.path.join(test.tmpdir, "pool.xml.tmp")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        test.fail("Pool %s already exist" % pool_name)

    def check_pool(pool_name, pool_type, checkpoint,
                   expect_value="", expect_error=False):
        """
        Check the pool after autostart it

        :param pool_name: Name of the pool.
        :param pool_type: Type of the pool.
        :param checkpoint: Which part for checking.
        :param expect_value: Expected value.
        :param expect_error: Boolen value, expect command success or fail
        """
        libvirt_pool = libvirt_storage.StoragePool()
        virsh.pool_list(option="--all", debug=True)
        if checkpoint == 'State':
            actual_value = libvirt_pool.pool_state(pool_name)
        if checkpoint == 'Autostart':
            actual_value = libvirt_pool.pool_autostart(pool_name)
        if actual_value != expect_value:
            if not expect_error:
                if checkpoint == 'State' and pool_type in ("dir", "scsi"):
                    error_msg = "Dir pool should be always active when libvirtd restart. "
                    error_msg += "See https://bugzilla.redhat.com/show_bug.cgi?id=1238610"
                    logging.error(error_msg)
                test.fail("Pool %s isn't %s as expected" % (checkpoint, expect_value))
            else:
                logging.debug("Pool %s is %s as expected", checkpoint, actual_value)

    def change_source_path(new_path, update_policy="set"):
        n_poolxml = pool_xml.PoolXML()
        n_poolxml = n_poolxml.new_from_dumpxml(pool_name)
        s_xml = n_poolxml.get_source()
        s_xml.device_path = new_path
        if update_policy == "set":
            n_poolxml.set_source(s_xml)
        elif update_policy == "add":
            n_poolxml.add_source("device", {"path": new_path})
        else:
            test.error("Unsupported policy type")
        logging.debug("After change_source_path:\n%s" %
                      open(n_poolxml.xml).read())
        return n_poolxml

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {'image_size': '1G', 'pre_disk_vol': ['100M'],
              'source_name': source_name, 'source_path': source_path,
              'source_format': source_format, 'persistent': True,
              'ip_protocal': ip_protocal}
    pool = pool_name
    clean_mount = False
    new_device = None
    try:
        if pre_def_pool:
            # Step(1)
            # Pool define
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
            # Remove the partition for disk pool
            # For sometimes the partition will cause pool start failed
            if pool_type == "disk":
                virsh.pool_build(pool_name, "--overwrite", debug=True)
            # Get pool uuid:
            if pool_ref == "uuid" and not pool_uuid:
                pool = pool_ins.get_pool_uuid(pool_name)

            # Setup logical block device
            # Change pool source path
            # Undefine pool
            # Define pool with new xml
            # Start pool
            if update_policy:
                new_device = utlv.setup_or_cleanup_iscsi(True)
                lv_utils.vg_create(vg_name, new_device)
                new_device = utlv.create_local_disk(disk_type, size="0.5",
                                                    vgname=vg_name, lvname=lv_name)
                new_path = new_device
                if invalid_source_path:
                    new_path = invalid_source_path
                if pool_type == "fs":
                    utlv.mkfs(new_device, source_format)
                n_poolxml = change_source_path(new_path, update_policy)
                p_xml = n_poolxml.xml
                if not virsh.pool_undefine(pool_name):
                    test.fail("Undefine pool %s failed" % pool_name)
                if not virsh.pool_define(p_xml):
                    test.fail("Define pool %s from %s failed" % (pool_name, p_xml))
                logging.debug("Start pool %s" % pool_name)
                result = virsh.pool_start(pool_name, ignore_status=True, debug=True)
                utlv.check_exit_status(result, status_error)
                # Mount a valid fs to pool target
                if pool_type == "fs":
                    source_list = []
                    mnt_cmd = ""
                    pool_target = n_poolxml.target_path
                    if invalid_source_path:
                        source_list.append(new_device)
                    else:
                        s_devices = n_poolxml.xmltreefile.findall("//source/device")
                        for dev in s_devices:
                            source_list.append(dev.get('path'))
                    try:
                        for src in source_list:
                            mnt_cmd = "mount %s %s" % (src, pool_target)
                            if not process.system(mnt_cmd, shell=True):
                                clean_mount = True
                    except process.CmdError:
                        test.error("Failed to run %s" % mnt_cmd)

        # Step(2)
        # Pool autostart
        logging.debug("Try to mark pool %s as autostart" % pool_name)
        result = virsh.pool_autostart(pool, readonly=ro_flag,
                                      ignore_status=True, debug=True)
        if not pre_def_pool:
            utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            check_pool(pool_name, pool_type, checkpoint='Autostart',
                       expect_value="yes", expect_error=status_error)

            # Step(3)
            # Restart libvirtd and check pool status
            logging.info("Try to restart libvirtd")
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            check_pool(pool_name, pool_type, checkpoint="State",
                       expect_value="active", expect_error=status_error)

            # Step(4)
            # Pool destroy
            if pool_ins.is_pool_active(pool_name):
                virsh.pool_destroy(pool_name)
                logging.debug("Pool %s destroyed" % pool_name)

            # Step(5)
            # Pool autostart disable
            logging.debug("Try to unmark pool %s as autostart" % pool_name)
            result = virsh.pool_autostart(pool, extra="--disable", debug=True,
                                          ignore_status=True)
            if not pre_def_pool:
                utlv.check_exit_status(result, status_error)
            if not result.exit_status:
                check_pool(pool_name, pool_type, checkpoint='Autostart',
                           expect_value="no", expect_error=status_error)

                # Repeat step (3)
                logging.debug("Try to restart libvirtd")
                libvirtd = utils_libvirtd.Libvirtd()
                libvirtd.restart()
                check_pool(pool_name, pool_type, checkpoint='State',
                           expect_value="inactive", expect_error=status_error)
    finally:
        # Clean up
        logging.debug("Try to clean up env")
        try:
            if clean_mount is True:
                for src in source_list:
                    process.system("umount %s" % pool_target)
            if pre_def_pool:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image, **kwargs)
            if new_device:
                utlv.delete_local_disk(disk_type, vgname=vg_name, lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                utlv.setup_or_cleanup_iscsi(False)
            if os.path.exists(p_xml):
                os.remove(p_xml)
        except test.fail as details:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.error(str(details))
def run(test, params, env):
    """
    Test pool command:virsh pool_autostart

    1) Define a given type pool
    2) Mark pool as autostart
    3) Restart libvirtd and check pool
    4) Destroy the pool
    5) Unmark pool as autostart
    6) Repeate step(3)
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    ip_protocal = params.get("ip_protocal", "ipv4")
    pool_ref = params.get("pool_ref", "name")
    pool_uuid = params.get("pool_uuid", "")
    invalid_source_path = params.get("invalid_source_path", "")
    status_error = "yes" == params.get("status_error", "no")
    readonly_mode = "yes" == params.get("readonly_mode", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "yes")
    disk_type = params.get("disk_type", "")
    vg_name = params.get("vg_name", "")
    lv_name = params.get("lv_name", "")
    update_policy = params.get("update_policy")

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    if pool_target is "":
        pool_target = os.path.join(test.tmpdir, pool_target)

    # The file for dumped pool xml
    p_xml = os.path.join(test.tmpdir, "pool.xml.tmp")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        test.fail("Pool %s already exist" % pool_name)

    def check_pool(pool_name,
                   pool_type,
                   checkpoint,
                   expect_value="",
                   expect_error=False):
        """
        Check the pool after autostart it

        :param pool_name: Name of the pool.
        :param pool_type: Type of the pool.
        :param checkpoint: Which part for checking.
        :param expect_value: Expected value.
        :param expect_error: Boolen value, expect command success or fail
        """
        libvirt_pool = libvirt_storage.StoragePool()
        virsh.pool_list(option="--all", debug=True)
        if checkpoint == 'State':
            actual_value = libvirt_pool.pool_state(pool_name)
        if checkpoint == 'Autostart':
            actual_value = libvirt_pool.pool_autostart(pool_name)
        if actual_value != expect_value:
            if not expect_error:
                if checkpoint == 'State' and pool_type in ("dir", "scsi"):
                    debug_msg = "Dir pool should be always active when libvirtd restart. "
                    debug_msg += "See https://bugzilla.redhat.com/show_bug.cgi?id=1238610"
                    logging.debug(debug_msg)
                else:
                    test.fail("Pool %s isn't %s as expected" %
                              (checkpoint, expect_value))
            else:
                logging.debug("Pool %s is %s as expected", checkpoint,
                              actual_value)

    def change_source_path(new_path, update_policy="set"):
        n_poolxml = pool_xml.PoolXML()
        n_poolxml = n_poolxml.new_from_dumpxml(pool_name)
        s_xml = n_poolxml.get_source()
        s_xml.device_path = new_path
        if update_policy == "set":
            n_poolxml.set_source(s_xml)
        elif update_policy == "add":
            n_poolxml.add_source("device", {"path": new_path})
        else:
            test.error("Unsupported policy type")
        logging.debug("After change_source_path:\n%s" %
                      open(n_poolxml.xml).read())
        return n_poolxml

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    kwargs = {
        'image_size': '1G',
        'pre_disk_vol': ['100M'],
        'source_name': source_name,
        'source_path': source_path,
        'source_format': source_format,
        'persistent': True,
        'ip_protocal': ip_protocal,
        'emulated_image': "emulated-image",
        'pool_target': pool_target
    }
    params.update(kwargs)
    pool = pool_name
    clean_mount = False
    new_device = None
    try:
        if pre_def_pool:
            # Step(1)
            # Pool define
            pvt.pre_pool(**params)
            # Remove the partition for disk pool
            # For sometimes the partition will cause pool start failed
            if pool_type == "disk":
                virsh.pool_build(pool_name, "--overwrite", debug=True)
            # Get pool uuid:
            if pool_ref == "uuid" and not pool_uuid:
                pool = pool_ins.get_pool_uuid(pool_name)

            # Setup logical block device
            # Change pool source path
            # Undefine pool
            # Define pool with new xml
            # Start pool
            if update_policy:
                new_device = utlv.setup_or_cleanup_iscsi(True)
                lv_utils.vg_create(vg_name, new_device)
                new_device = utlv.create_local_disk(disk_type,
                                                    size="0.5",
                                                    vgname=vg_name,
                                                    lvname=lv_name)
                new_path = new_device
                if invalid_source_path:
                    new_path = invalid_source_path
                if pool_type == "fs":
                    utlv.mkfs(new_device, source_format)
                n_poolxml = change_source_path(new_path, update_policy)
                p_xml = n_poolxml.xml
                if not virsh.pool_undefine(pool_name):
                    test.fail("Undefine pool %s failed" % pool_name)
                if not virsh.pool_define(p_xml):
                    test.fail("Define pool %s from %s failed" %
                              (pool_name, p_xml))
                logging.debug("Start pool %s" % pool_name)
                result = virsh.pool_start(pool_name,
                                          ignore_status=True,
                                          debug=True)
                utlv.check_exit_status(result, status_error)
                # Mount a valid fs to pool target
                if pool_type == "fs":
                    source_list = []
                    mnt_cmd = ""
                    pool_target = n_poolxml.target_path
                    if invalid_source_path:
                        source_list.append(new_device)
                    else:
                        s_devices = n_poolxml.xmltreefile.findall(
                            "//source/device")
                        for dev in s_devices:
                            source_list.append(dev.get('path'))
                    try:
                        for src in source_list:
                            mnt_cmd = "mount %s %s" % (src, pool_target)
                            if not process.system(mnt_cmd, shell=True):
                                clean_mount = True
                    except process.CmdError:
                        test.error("Failed to run %s" % mnt_cmd)

        # Step(2)
        # Pool autostart
        logging.debug("Try to mark pool %s as autostart" % pool_name)
        result = virsh.pool_autostart(pool,
                                      readonly=ro_flag,
                                      ignore_status=True,
                                      debug=True)
        if not pre_def_pool:
            utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            check_pool(pool_name,
                       pool_type,
                       checkpoint='Autostart',
                       expect_value="yes",
                       expect_error=status_error)

            # Step(3)
            # Restart libvirtd and check pool status
            logging.info("Try to restart libvirtd")
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            check_pool(pool_name,
                       pool_type,
                       checkpoint="State",
                       expect_value="active",
                       expect_error=status_error)

            # Step(4)
            # Pool destroy
            if pool_ins.is_pool_active(pool_name):
                virsh.pool_destroy(pool_name)
                logging.debug("Pool %s destroyed" % pool_name)

            # Step(5)
            # Pool autostart disable
            logging.debug("Try to unmark pool %s as autostart" % pool_name)
            result = virsh.pool_autostart(pool,
                                          extra="--disable",
                                          debug=True,
                                          ignore_status=True)
            if not pre_def_pool:
                utlv.check_exit_status(result, status_error)
            if not result.exit_status:
                check_pool(pool_name,
                           pool_type,
                           checkpoint='Autostart',
                           expect_value="no",
                           expect_error=status_error)

                # Repeat step (3)
                logging.debug("Try to restart libvirtd")
                libvirtd = utils_libvirtd.Libvirtd()
                libvirtd.restart()
                check_pool(pool_name,
                           pool_type,
                           checkpoint='State',
                           expect_value="inactive",
                           expect_error=status_error)
    finally:
        # Clean up
        logging.debug("Try to clean up env")
        try:
            if clean_mount is True:
                for src in source_list:
                    process.system("umount %s" % pool_target)
            if pre_def_pool:
                pvt.cleanup_pool(**params)
            if new_device:
                utlv.delete_local_disk(disk_type,
                                       vgname=vg_name,
                                       lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                utlv.setup_or_cleanup_iscsi(False)
            if os.path.exists(p_xml):
                os.remove(p_xml)
        except exceptions.TestFail as details:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.error(str(details))
Esempio n. 7
0
    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(
                    disk_src_host.split(), disk_src_port.split()):
                source_host.append({'name': host_name,
                                    'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'"
                            % (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(pool_name, mon_host,
                                      disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)
Esempio n. 8
0
    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(disk_src_host.split(),
                                              disk_src_port.split()):
                source_host.append({'name': host_name, 'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = (
                    "--auth-type %s --auth-username %s --secret-usage '%s'" %
                    (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(
                    pool_name, mon_host, disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)
Esempio n. 9
0
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    cleanup_env = [False, False, False]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    + " libvirt version.")

    acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user,
                 'debug': True}

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utils_test.libvirt.define_pool(pool_name, pool_type,
                                                pool_target, cleanup_env)
        check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            check_exit_status(result)

        # Step (3)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
        # Options --overwrite and --no-overwrite can only be used to
        # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name, option,
                                          ignore_status=True)
            check_exit_status(result, build_error)
            if build_error:
                # Redo under negative case to keep case continue
                result = virsh.pool_build(pool_name, option,
                                          ignore_status=True)
                check_exit_status(result)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected."
                                     % pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_env[1]:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
Esempio n. 10
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get("ip_protocal", "ipv4")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current" " libvirt version.")

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find volume '%s' in pool '%s'.", vol_name, pool_name)
        else:
            raise error.TestFail("Not find volume '%s' in pool '%s'." % (vol_name, pool_name))

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            raise error.TestFail("Pool info dictionary is needed.")
        if pool_info[check_point] == value:
            logging.debug("Pool '%s' is '%s'.", check_point, value)
        else:
            raise error.TestFail("Pool '%s' isn't '%s'." % (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {
        "image_size": "1G",
        "pre_disk_vol": ["1M"],
        "source_name": source_name,
        "source_path": source_path,
        "source_format": source_format,
        "persistent": True,
        "ip_protocal": ip_protocal,
    }
    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (4)
        # Undefine pool
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(pool_xml)
        check_exit_status(result, status_error)

        # Step (6)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
            # Options --overwrite and --no-overwrite can only be used to
            # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            check_exit_status(result)

        # Step (7)
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)

        # Step (8)
        # Pool list
        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (9)
        # Pool autostart
        result = virsh.pool_autostart(pool_name, ignore_status=True)
        check_exit_status(result)

        # Step (10)
        # Pool list
        option = "--autostart --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (11)
        # Restart libvirtd and check the autostart pool
        utils_libvirtd.libvirtd_restart()
        option = "--autostart --persistent"
        check_pool_list(pool_name, option)

        # Step (12)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (13)
        # Pool autostart disable
        result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True)
        check_exit_status(result)

        # Step (14)
        # Repeat step (11)
        utils_libvirtd.libvirtd_restart()
        option = "--autostart"
        check_pool_list(pool_name, option, True)

        # Step (15)
        # Pool start
        # When libvirtd starts up, it'll check to see if any of the storage
        # pools have been activated externally. If so, then it'll mark the
        # pool as active. This is independent of autostart.
        # So a directory based storage pool is thus pretty much always active,
        # and so as the SCSI pool.
        if pool_type != "scsi" and (pool_type != "dir" or libvirt_version.version_compare(1, 2, 15)):
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        # Step (16)
        # Pool info
        pool_info = _pool.pool_info(pool_name)
        logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

        # Step (17)
        # Pool UUID
        result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
        check_exit_status(result)
        check_pool_info(pool_info, "UUID", result.stdout.strip())

        # Step (18)
        # Pool Name
        result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
        check_exit_status(result)
        check_pool_info(pool_info, "Name", result.stdout.strip())

        # Step (19)
        # Pool refresh for 'dir' type pool
        if pool_type == "dir":
            os.mknod(vol_path)
            result = virsh.pool_refresh(pool_name)
            check_exit_status(result)
            check_vol_list(vol_name, pool_name)

        # Step (20)
        # Create an over size vol in pool(expect fail), then check pool:
        # 'Capacity', 'Allocation' and 'Available'
        # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
        # and glusterfs pool not support create volume, so not test them
        if pool_type != "netfs":
            vol_capacity = "10000G"
            vol_allocation = "10000G"
            result = virsh.vol_create_as("oversize_vol", pool_name, vol_capacity, vol_allocation, "raw")
            check_exit_status(result, True)
            new_info = _pool.pool_info(pool_name)
            check_pool_info(pool_info, "Capacity", new_info["Capacity"])
            check_pool_info(pool_info, "Allocation", new_info["Allocation"])
            check_pool_info(pool_info, "Available", new_info["Available"])

        # Step (21)
        # Undefine pool, this should fail as the pool is active
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result, expect_error=True)
        check_pool_list(pool_name, "", False)

        # Step (22)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (23)
        # Pool delete for 'dir' type pool
        if pool_type == "dir":
            for f in os.listdir(pool_target):
                os.remove(os.path.join(pool_target, f))
            result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if os.path.exists(pool_target):
                raise error.TestFail("The target path '%s' still exist." % pool_target)
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result, True)

        # Step (24)
        # Pool undefine
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
Esempio n. 11
0
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster
    cleanup_env = [False, False, False, "", False]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    acl_dargs = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True
    }

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        if list_dumpxml_acl:
            result = virsh.pool_list(option, **acl_dargs)
        else:
            result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    kwargs = {'source_format': params.get('pool_source_format', 'ext4')}
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utlv.define_pool(pool_name, pool_type, pool_target,
                                  cleanup_env, **kwargs)
        utlv.check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        if list_dumpxml_acl:
            xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs)
        else:
            xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        utlv.check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            utlv.check_exit_status(result)

        # Step (3)
        # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool
        # disk/fs pool: as prepare step already make label and create filesystem
        #               for the disk, use '--overwrite' is necessary
        # logical_pool: build pool will fail if VG already exist, BZ#1373711
        if pool_type != "logical":
            option = ''
            if pool_type in ['disk', 'fs']:
                option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name,
                                          option,
                                          ignore_status=True)
            utlv.check_exit_status(result, build_error)
        if build_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)

        # For iSCSI pool, we need discover targets before start the pool
        if pool_type == 'iscsi':
            cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1'
            process.run(cmd, shell=True)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected." %
                                     pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            utlv.check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        utlv.check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            utlv.check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = process.system_output(cmd, shell=True)
            lv_utils.vg_remove(vg_name)
            process.run("pvremove %s" % pv_name, shell=True)
        if cleanup_env[1]:
            utlv.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utlv.setup_or_cleanup_nfs(False, restore_selinux=cleanup_env[3])
Esempio n. 12
0
def run(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file.
    1) Use xml file from test parameters to create pool.
    2) Dump the exist pool to create pool.
    3) Negatively create pool(invalid option, no file, invalid file, duplicate
       pool name, and create pool under readonly mode).
    """

    pool_xml = params.get("pool_create_xml_file")
    pool_name = params.get("pool_create_name")
    option = params.get("pool_create_extra_option", "")
    use_exist_pool = "yes" == params.get("pool_create_use_exist_pool", "no")
    exist_pool_name = params.get("pool_create_exist_pool_name", "default")
    undefine_exist_pool = "yes" == params.get(
        "pool_create_undefine_exist_pool", "no")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")

    # Deal with each parameters
    # backup the exist pool
    pool_ins = libvirt_storage.StoragePool()
    if use_exist_pool:
        if not pool_ins.pool_exists(exist_pool_name):
            raise error.TestFail("Require pool: %s exist", exist_pool_name)
        backup_xml = libvirt_xml.PoolXML.backup_xml(exist_pool_name)
        pool_xml = backup_xml

    # Delete the exist pool
    start_pool = False
    if undefine_exist_pool:
        poolxml = libvirt_xml.PoolXML.new_from_dumpxml(exist_pool_name)
        poolxml.name = pool_name
        pool_xml = poolxml.xml
        if pool_ins.is_pool_active(exist_pool_name):
            start_pool = True
        if not pool_ins.delete_pool(exist_pool_name):
            raise error.TestFail("Delete pool: %s fail", exist_pool_name)

    # Create an invalid pool xml file
    if pool_xml == "invalid-pool-xml":
        tmp_xml = xml_utils.TempXMLFile()
        tmp_xml.write('"<pool><<<BAD>>><\'XML</name\>'
                      '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>')
        tmp_xml.flush()
        pool_xml = tmp_xml.name
        logging.info(pool_xml)

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    # Run virsh test
    if os.path.isfile(pool_xml):
        logging.debug("Create pool from file:\n %s", open(pool_xml, 'r').read())
    try:
        cmd_result = virsh.pool_create(
            pool_xml,
            option,
            ignore_status=True,
            debug=True,
            readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            elif not pool_check(pool_name, pool_ins):
                raise error.TestFail("Pool check fail")
        elif status_error and status == 0:
            raise error.TestFail("Expect fail, but run successfully.")
    finally:
        # Recover env
        if pool_ins.is_pool_active(pool_name):
            virsh.pool_destroy(pool_name)
        if undefine_exist_pool and not pool_ins.pool_exists(exist_pool_name):
            virsh.pool_define(backup_xml)
            if start_pool:
                pool_ins.start_pool(exist_pool_name)
Esempio n. 13
0
def run(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file.
    1) Use xml file from test parameters to create pool.
    2) Dump the exist pool to create pool.
    3) Negatively create pool(invalid option, no file, invalid file, duplicate
       pool name, and create pool under readonly mode).
    """

    pool_xml = params.get("pool_create_xml_file")
    pool_name = params.get("pool_create_name")
    option = params.get("pool_create_extra_option", "")
    use_exist_pool = "yes" == params.get("pool_create_use_exist_pool", "no")
    exist_pool_name = params.get("pool_create_exist_pool_name", "default")
    undefine_exist_pool = "yes" == params.get(
        "pool_create_undefine_exist_pool", "no")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")
    exist_active = False

    # Deal with each parameters
    # backup the exist pool
    pool_ins = libvirt_storage.StoragePool()
    if use_exist_pool:
        if not pool_ins.pool_exists(exist_pool_name):
            raise error.TestFail("Require pool: %s exist", exist_pool_name)
        backup_xml = libvirt_xml.PoolXML.backup_xml(exist_pool_name)
        pool_xml = backup_xml
        exist_active = pool_ins.is_pool_active(exist_pool_name)

    # backup pool state
    pool_ins_state = virsh.pool_state_dict()
    logging.debug("Backed up pool(s): %s", pool_ins_state)

    if "--file" in option:
        pool_path = os.path.join(data_dir.get_data_dir(), 'images')
        dir_xml = """
<pool type='dir'>
  <name>%s</name>
  <target>
    <path>%s</path>
  </target>
</pool>
""" % (pool_name, pool_path)
        pool_xml = os.path.join(test.tmpdir, pool_xml)
        xml_object = open(pool_xml, 'w')
        xml_object.write(dir_xml)
        xml_object.close()

    # Delete the exist pool
    start_pool = False
    if undefine_exist_pool:
        poolxml = libvirt_xml.PoolXML.new_from_dumpxml(exist_pool_name)
        poolxml.name = pool_name
        pool_xml = poolxml.xml
        if pool_ins.is_pool_active(exist_pool_name):
            start_pool = True
        if not pool_ins.delete_pool(exist_pool_name):
            raise error.TestFail("Delete pool: %s fail", exist_pool_name)

    # Create an invalid pool xml file
    if pool_xml == "invalid-pool-xml":
        tmp_xml = xml_utils.TempXMLFile()
        tmp_xml.write('"<pool><<<BAD>>><\'XML</name\>'
                      '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>')
        tmp_xml.flush()
        pool_xml = tmp_xml.name
        logging.info(pool_xml)

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    # Run virsh test
    if os.path.isfile(pool_xml):
        logging.debug("Create pool from file:\n %s", open(pool_xml, 'r').read())
    try:
        cmd_result = virsh.pool_create(
            pool_xml,
            option,
            ignore_status=True,
            debug=True,
            readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            elif not pool_check(pool_name, pool_ins):
                raise error.TestFail("Pool check fail")
        elif status_error and status == 0:
            # For an inactive 'default' pool, when the test runs to create
            # an existing pool of 'default', the test will pass because the
            # 'default' pool will be considered a transient pool when the
            # 'name' and 'uuid' match (which they do in this case). So
            # don't fail the test unnecessarily
            if (pool_name == exist_pool_name and not exist_active):
                pass
            else:
                raise error.TestFail("Expect fail, but run successfully.")
    finally:
        # Recover env
        # If we have a different pool name than default OR
        # we need to undefine this tests created default pool OR
        # we had a transient, active default pool created above, then
        # we need to destroy what the test created.
        # NB: When the active, transient pool is destroyed the
        # previously defined, but inactive pool will now exist.
        if pool_name != exist_pool_name or undefine_exist_pool or \
           (pool_name == exist_pool_name and not exist_active):
            virsh.pool_destroy(pool_name)

        # restore the undefined default pool
        if undefine_exist_pool: # and not pool_ins.pool_exists(exist_pool_name):
            virsh.pool_define(backup_xml)
            if start_pool:
                pool_ins.start_pool(exist_pool_name)
            # Recover autostart
            if pool_ins_state[exist_pool_name]['autostart']:
                virsh.pool_autostart(exist_pool_name, ignore_status=False)
Esempio n. 14
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Destroy the pool
    (21) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (22) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    cleanup_env = [False, False, False]

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug(
                "Find volume '%s' in pool '%s'.", vol_name, pool_name)
        else:
            raise error.TestFail(
                "Not find volume '%s' in pool '%s'." %
                (vol_name, pool_name))

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            raise error.TestFail("Pool info dictionary is needed.")
        if pool_info[check_point] == value:
            logging.debug("Pool '%s' is '%s'.", check_point, value)
        else:
            raise error.TestFail("Pool '%s' isn't '%s'." % (check_point,
                                                            value))

    # Run Testcase
    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        result = utils_test.libvirt.define_pool(pool_name, pool_type,
                                                pool_target, cleanup_env)
        check_exit_status(result, status_error)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (4)
        # Undefine pool
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(pool_xml)
        check_exit_status(result, status_error)

        # Step (6)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
        # Options --overwrite and --no-overwrite can only be used to
        # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            check_exit_status(result)

        # Step (7)
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)

        # Step (8)
        # Pool list
        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (9)
        # Pool autostart
        result = virsh.pool_autostart(pool_name, ignore_status=True)
        check_exit_status(result)

        # Step (10)
        # Pool list
        option = "--autostart --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (11)
        # Restart libvirtd and check the autostart pool
        utils_libvirtd.libvirtd_restart()
        option = "--autostart --persistent"
        check_pool_list(pool_name, option)

        # Step (12)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (13)
        # Pool autostart disable
        result = virsh.pool_autostart(pool_name, "--disable",
                                      ignore_status=True)
        check_exit_status(result)

        # Step (14)
        # Repeat step (11)
        utils_libvirtd.libvirtd_restart()
        option = "--autostart"
        check_pool_list(pool_name, option, True)

        # Step (15)
        # Pool start
        # If the filesystem cntaining the directory is mounted, then the
        # directory will show as running, which means the local 'dir' pool
        # don't need start after restart libvirtd
        if pool_type != "dir":
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        # Step (16)
        # Pool info
        pool_info = _pool.pool_info(pool_name)
        logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

        # Step (17)
        # Pool UUID
        result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
        check_exit_status(result)
        check_pool_info(pool_info, "UUID", result.stdout.strip())

        # Step (18)
        # Pool Name
        result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
        check_exit_status(result)
        check_pool_info(pool_info, "Name", result.stdout.strip())

        # Step (19)
        # Pool refresh for 'dir' type pool
        if pool_type == "dir":
            os.mknod(vol_path)
            result = virsh.pool_refresh(pool_name)
            check_exit_status(result)
            check_vol_list(vol_name, pool_name)

        # Step(20)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (21)
        # Pool delete for 'dir' type pool
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if os.path.exists(pool_target):
                raise error.TestFail("The target path '%s' still exist." %
                                     pool_target)
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result, True)

        # Step (22)
        # Pool undefine
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_env[1]:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
Esempio n. 15
0
def run_virsh_pool_create(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file.
    1) Use xml file from test parameters to create pool.
    2) Dump the exist pool to create pool.
    3) Negatively create pool(invalid option, no file, invalid file, duplicate
       pool name, and create pool under readonly mode).
    """

    pool_xml = params.get("pool_create_xml_file")
    pool_name = params.get("pool_create_name")
    option = params.get("pool_create_extra_option", "")
    use_exist_pool = "yes" == params.get("pool_create_use_exist_pool", "no")
    exist_pool_name = params.get("pool_create_exist_pool_name", "default")
    undefine_exist_pool = "yes" == params.get(
        "pool_create_undefine_exist_pool", "no")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")

    # Deal with each parameters
    # backup the exist pool
    pool_ins = libvirt_storage.StoragePool()
    if use_exist_pool:
        if not pool_ins.pool_exists(exist_pool_name):
            raise error.TestFail("Require pool: %s exist", exist_pool_name)
        backup_xml = libvirt_xml.PoolXML.backup_xml(exist_pool_name)
        pool_xml = backup_xml

    # Delete the exist pool
    start_pool = False
    if undefine_exist_pool:
        poolxml = libvirt_xml.PoolXML.new_from_dumpxml(exist_pool_name)
        poolxml.name = pool_name
        pool_xml = poolxml.xml
        if pool_ins.is_pool_active(exist_pool_name):
            start_pool = True
        if not pool_ins.delete_pool(exist_pool_name):
            raise error.TestFail("Delete pool: %s fail", exist_pool_name)

    # Create an invalid pool xml file
    if pool_xml == "invalid-pool-xml":
        tmp_xml = xml_utils.TempXMLFile()
        tmp_xml.write('"<pool><<<BAD>>><\'XML</name\>'
                      '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>')
        tmp_xml.flush()
        pool_xml = tmp_xml.name
        logging.info(pool_xml)

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    # Run virsh test
    if os.path.isfile(pool_xml):
        logging.debug("Create pool from file:\n %s",
                      open(pool_xml, 'r').read())
    try:
        cmd_result = virsh.pool_create(pool_xml,
                                       option,
                                       ignore_status=True,
                                       debug=True,
                                       readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            elif not pool_check(pool_name, pool_ins):
                raise error.TestFail("Pool check fail")
        elif status_error and status == 0:
            raise error.TestFail("Expect fail, but run successfully.")
    finally:
        # Recover env
        if pool_ins.is_pool_active(pool_name):
            virsh.pool_destroy(pool_name)
        if undefine_exist_pool and not pool_ins.pool_exists(exist_pool_name):
            virsh.pool_define(backup_xml)
            if start_pool:
                pool_ins.start_pool(exist_pool_name)
Esempio n. 16
0
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster
    cleanup_env = [False, False, False, "", False]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user,
                 'debug': True}

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        if list_dumpxml_acl:
            result = virsh.pool_list(option, **acl_dargs)
        else:
            result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    kwargs = {'source_format': params.get('pool_source_format', 'ext4')}
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utlv.define_pool(pool_name, pool_type, pool_target,
                                  cleanup_env, **kwargs)
        utlv.check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        if list_dumpxml_acl:
            xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs)
        else:
            xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        utlv.check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            utlv.check_exit_status(result)

        # Step (3)
        # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool
        # disk/fs pool: as prepare step already make label and create filesystem
        #               for the disk, use '--overwrite' is necessary
        # logical_pool: build pool will fail if VG already exist, BZ#1373711
        if pool_type != "logical":
            option = ''
            if pool_type in ['disk', 'fs']:
                option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name, option,
                                          ignore_status=True)
            utlv.check_exit_status(result, build_error)
        if build_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_build(pool_name, option,
                                      ignore_status=True)
            utlv.check_exit_status(result)

        # For iSCSI pool, we need discover targets before start the pool
        if pool_type == 'iscsi':
            cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1'
            process.run(cmd, shell=True)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected."
                                     % pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            utlv.check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        utlv.check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            utlv.check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = process.system_output(cmd, shell=True)
            lv_utils.vg_remove(vg_name)
            process.run("pvremove %s" % pv_name, shell=True)
        if cleanup_env[1]:
            utlv.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utlv.setup_or_cleanup_nfs(
                False, restore_selinux=cleanup_env[3])
Esempio n. 17
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    new_pool_name = params.get("new_pool_name", "")
    build_option = params.get("build_option", "")
    iscsi_initiator = params.get("iscsi_initiator", "")
    same_source_test = "yes" == params.get("same_source_test", "no")
    customize_initiator_iqn = "yes" == params.get("customize_initiator_iqn",
                                                  "no")
    # The file for dumped pool xml
    poolxml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("volume_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get('ip_protocal', 'ipv4')

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")
    if not libvirt_version.version_compare(4, 7, 0):
        if pool_type == "iscsi-direct":
            test.cancel("iSCSI-direct pool is not supported in current"
                        "libvirt version.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)",
                            str(result.stdout.strip()))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            test.fail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            test.fail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)", str(result.stdout.strip()))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find volume '%s' in pool '%s'.", vol_name,
                          pool_name)
        else:
            test.fail("Not find volume '%s' in pool '%s'." %
                      (vol_name, pool_name))

    def is_in_range(actual, expected, error_percent):
        deviation = 100 - (100 * (float(actual) / float(expected)))
        logging.debug("Deviation: %0.2f%%", float(deviation))
        return float(deviation) <= float(error_percent)

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            test.fail("Pool info dictionary is needed.")
        val_tup = ('Capacity', 'Allocation', 'Available')
        if check_point in val_tup and float(value.split()[0]):
            # As from bytes to GiB, could cause deviation, and it should not
            # exceed 1 percent.
            if is_in_range(float(pool_info[check_point].split()[0]),
                           float(value.split()[0]), 1):
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                test.fail("Pool '%s' isn't '%s'." % (check_point, value))
        else:
            if pool_info[check_point] == value:
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                test.fail("Pool '%s' isn't '%s'." % (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    kwargs = {
        'image_size': '1G',
        'pre_disk_vol': ['100M'],
        'source_name': source_name,
        'source_path': source_path,
        'source_format': source_format,
        'persistent': True,
        'ip_protocal': ip_protocal,
        'emulated_image': "emulated-image",
        'pool_target': pool_target,
        'iscsi_initiator': iscsi_initiator
    }
    params.update(kwargs)

    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(**params)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=poolxml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Update pool name
        if new_pool_name:
            if "/" in new_pool_name:
                new_pool_name = new_pool_name.replace("/", "\/")
                logging.debug(new_pool_name)
            p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
            p_xml.name = new_pool_name
            del p_xml.uuid
            poolxml = p_xml.xml
            logging.debug("XML after update pool name:\n%s" % p_xml)

        # Update host name
        if same_source_test:
            s_xml = p_xml.get_source()
            s_xml.host_name = "192.168.1.1"
            p_xml.set_source(s_xml)
            poolxml = p_xml.xml
            logging.debug("XML after update host name:\n%s" % p_xml)

        if customize_initiator_iqn:
            initiator_iqn = params.get("initiator_iqn",
                                       "iqn.2018-07.com.virttest:pool.target")
            p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
            s_node = p_xml.xmltreefile.find('/source')
            i_node = ET.SubElement(s_node, 'initiator')
            ET.SubElement(i_node, 'iqn', {'name': initiator_iqn})
            p_xml.xmltreefile.write()
            poolxml = p_xml.xml
            logging.debug('XML after add Multi-IQN:\n%s' % p_xml)

        # Step (4)
        # Undefine pool
        if not same_source_test:
            result = virsh.pool_undefine(pool_name)
            utlv.check_exit_status(result)
            check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(poolxml, debug=True)
        # Give error msg when exit status is not expected
        if "/" in new_pool_name and not result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=639923 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        if "." in new_pool_name and result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1333248 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        if same_source_test and not result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1171984 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            # Step (6)
            # Buid pool
            # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool
            # disk/fs pool: as prepare step already make label and create filesystem
            #               for the disk, use '--overwrite' is necessary
            # logical_pool: build pool will fail if VG already exist, BZ#1373711
            if new_pool_name:
                pool_name = new_pool_name
            if pool_type != "logical":
                result = virsh.pool_build(pool_name,
                                          build_option,
                                          ignore_status=True)
                utlv.check_exit_status(result)

            # Step (7)
            # Pool start
            result = virsh.pool_start(pool_name,
                                      debug=True,
                                      ignore_status=True)
            utlv.check_exit_status(result)

            # Step (8)
            # Pool list
            option = "--persistent --type %s" % pool_type
            check_pool_list(pool_name, option)

            # Step (9)
            # Pool autostart
            result = virsh.pool_autostart(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

            # Step (10)
            # Pool list
            option = "--autostart --type %s" % pool_type
            check_pool_list(pool_name, option)

            # Step (11)
            # Restart libvirtd and check the autostart pool
            utils_libvirtd.libvirtd_restart()
            option = "--autostart --persistent"
            check_pool_list(pool_name, option)

            # Step (12)
            # Pool destroy
            if virsh.pool_destroy(pool_name):
                logging.debug("Pool %s destroyed.", pool_name)
            else:
                test.fail("Destroy pool % failed." % pool_name)

            # Step (13)
            # Pool autostart disable
            result = virsh.pool_autostart(pool_name,
                                          "--disable",
                                          ignore_status=True)
            utlv.check_exit_status(result)

            # Step (14)
            # Repeat step (11)
            utils_libvirtd.libvirtd_restart()
            option = "--autostart"
            check_pool_list(pool_name, option, True)

            # Step (15)
            # Pool start
            # When libvirtd starts up, it'll check to see if any of the storage
            # pools have been activated externally. If so, then it'll mark the
            # pool as active. This is independent of autostart.
            # So a directory based storage pool is thus pretty much always active,
            # and so as the SCSI pool.
            if pool_type not in ["dir", 'scsi']:
                result = virsh.pool_start(pool_name, ignore_status=True)
                utlv.check_exit_status(result)

            # Step (16)
            # Pool info
            pool_info = _pool.pool_info(pool_name)
            logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

            # Step (17)
            # Pool UUID
            result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_info(pool_info, "UUID", result.stdout.strip())

            # Step (18)
            # Pool Name
            result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_info(pool_info, "Name", result.stdout.strip())

            # Step (19)
            # Pool refresh for 'dir' type pool
            if pool_type == "dir":
                os.mknod(vol_path)
                result = virsh.pool_refresh(pool_name)
                utlv.check_exit_status(result)
                check_vol_list(vol_name, pool_name)

            # Step (20)
            # Create an over size vol in pool(expect fail), then check pool:
            # 'Capacity', 'Allocation' and 'Available'
            # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
            # and glusterfs pool not support create volume, so not test them
            if pool_type != "netfs":
                vol_capacity = "10000G"
                vol_allocation = "10000G"
                result = virsh.vol_create_as("oversize_vol", pool_name,
                                             vol_capacity, vol_allocation,
                                             "raw")
                utlv.check_exit_status(result, True)
                new_info = _pool.pool_info(pool_name)
                check_items = ["Capacity", "Allocation", "Available"]
                for i in check_items:
                    check_pool_info(pool_info, i, new_info[i])

            # Step (21)
            # Undefine pool, this should fail as the pool is active
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            utlv.check_exit_status(result, expect_error=True)
            check_pool_list(pool_name, "", False)

            # Step (22)
            # Pool destroy
            if virsh.pool_destroy(pool_name):
                logging.debug("Pool %s destroyed.", pool_name)
            else:
                test.fail("Destroy pool % failed." % pool_name)

            # Step (23)
            # Pool delete for 'dir' type pool
            if pool_type == "dir":
                for f in os.listdir(pool_target):
                    os.remove(os.path.join(pool_target, f))
                    result = virsh.pool_delete(pool_name, ignore_status=True)
                    utlv.check_exit_status(result)
                    option = "--inactive --type %s" % pool_type
                    check_pool_list(pool_name, option)
                    if os.path.exists(pool_target):
                        test.fail("The target path '%s' still exist." %
                                  pool_target)
                        result = virsh.pool_start(pool_name,
                                                  ignore_status=True)
                        utlv.check_exit_status(result, True)

            # Step (24)
            # Pool undefine
                result = virsh.pool_undefine(pool_name, ignore_status=True)
                utlv.check_exit_status(result)
                check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(**params)
            utlv.setup_or_cleanup_iscsi(False)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(poolxml):
            os.remove(poolxml)
Esempio n. 18
0
def run(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file.
    1) Use xml file from test parameters to create pool.
    2) Dump the exist pool to create pool.
    3) Negatively create pool(invalid option, no file, invalid file, duplicate
       pool name, and create pool under readonly mode).
    """

    pool_xml = params.get("pool_create_xml_file")
    pool_name = params.get("pool_create_name")
    option = params.get("pool_create_extra_option", "")
    use_exist_pool = "yes" == params.get("pool_create_use_exist_pool", "no")
    exist_pool_name = params.get("pool_create_exist_pool_name", "default")
    undefine_exist_pool = "yes" == params.get(
        "pool_create_undefine_exist_pool", "no")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")
    exist_active = False

    # Deal with each parameters
    # backup the exist pool
    pool_ins = libvirt_storage.StoragePool()
    if use_exist_pool:
        if not pool_ins.pool_exists(exist_pool_name):
            raise error.TestFail("Require pool: %s exist", exist_pool_name)
        backup_xml = libvirt_xml.PoolXML.backup_xml(exist_pool_name)
        pool_xml = backup_xml
        exist_active = pool_ins.is_pool_active(exist_pool_name)

    # backup pool state
    pool_ins_state = virsh.pool_state_dict()
    logging.debug("Backed up pool(s): %s", pool_ins_state)

    created_pool_path = False
    if "--file" in option:
        # As of commit id 'c5f6c685' in virt-install (v1.0.0), virt-install
        # will attempt to create a storage pool for ease of management of
        # volumes. Thus if the test was started with --install or by chance
        # an 'images' pool exists using get_data_dir(), then the attempt
        # to define/create this pool will fail since two pools cannot use
        # the same path.  Therefore, rather than use the data dir for our
        # test, let's just use 'tmp' dir and create an images pool there.
        pool_path = os.path.join(data_dir.get_tmp_dir(), 'images')
        if not os.path.exists(pool_path):
            os.mkdir(pool_path)
            created_pool_path = True
        dir_xml = """
<pool type='dir'>
  <name>%s</name>
  <target>
    <path>%s</path>
  </target>
</pool>
""" % (pool_name, pool_path)
        pool_xml = os.path.join(test.tmpdir, pool_xml)
        xml_object = open(pool_xml, 'w')
        xml_object.write(dir_xml)
        xml_object.close()

    # Delete the exist pool
    start_pool = False
    if undefine_exist_pool:
        poolxml = libvirt_xml.PoolXML.new_from_dumpxml(exist_pool_name)
        poolxml.name = pool_name
        pool_xml = poolxml.xml
        if pool_ins.is_pool_active(exist_pool_name):
            start_pool = True
        if not pool_ins.delete_pool(exist_pool_name):
            raise error.TestFail("Delete pool: %s fail", exist_pool_name)

    # Create an invalid pool xml file
    if pool_xml == "invalid-pool-xml":
        tmp_xml = xml_utils.TempXMLFile()
        tmp_xml.write('"<pool><<<BAD>>><\'XML</name\>'
                      '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>')
        tmp_xml.flush()
        pool_xml = tmp_xml.name
        logging.info(pool_xml)

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    # Run virsh test
    if os.path.isfile(pool_xml):
        logging.debug("Create pool from file:\n %s",
                      open(pool_xml, 'r').read())
    try:
        cmd_result = virsh.pool_create(pool_xml,
                                       option,
                                       ignore_status=True,
                                       debug=True,
                                       readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                raise error.TestFail(err)
            elif not pool_check(pool_name, pool_ins):
                raise error.TestFail("Pool check fail")
        elif status_error and status == 0:
            # For an inactive 'default' pool, when the test runs to create
            # an existing pool of 'default', the test will pass because the
            # 'default' pool will be considered a transient pool when the
            # 'name' and 'uuid' match (which they do in this case). So
            # don't fail the test unnecessarily
            if (pool_name == exist_pool_name and not exist_active):
                pass
            else:
                raise error.TestFail("Expect fail, but run successfully.")
    finally:
        # Recover env
        # If we have a different pool name than default OR
        # we need to undefine this tests created default pool OR
        # we had a transient, active default pool created above, then
        # we need to destroy what the test created.
        # NB: When the active, transient pool is destroyed the
        # previously defined, but inactive pool will now exist.
        if pool_name != exist_pool_name or undefine_exist_pool or \
           (pool_name == exist_pool_name and not exist_active):
            virsh.pool_destroy(pool_name)

        # restore the undefined default pool
        if undefine_exist_pool:  # and not pool_ins.pool_exists(exist_pool_name):
            virsh.pool_define(backup_xml)
            if start_pool:
                pool_ins.start_pool(exist_pool_name)
            # Recover autostart
            if pool_ins_state[exist_pool_name]['autostart']:
                virsh.pool_autostart(exist_pool_name, ignore_status=False)
        # If we created the pool_path, then remove it
        if created_pool_path:
            os.rmdir(pool_path)
Esempio n. 19
0
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    cleanup_env = [False, False, False]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current" +
                                    " libvirt version.")

    acl_dargs = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True
    }

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utils_test.libvirt.define_pool(pool_name, pool_type,
                                                pool_target, cleanup_env)
        check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            check_exit_status(result)

        # Step (3)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
            # Options --overwrite and --no-overwrite can only be used to
            # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name,
                                          option,
                                          ignore_status=True)
            check_exit_status(result, build_error)
            if build_error:
                # Redo under negative case to keep case continue
                result = virsh.pool_build(pool_name,
                                          option,
                                          ignore_status=True)
                check_exit_status(result)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected." %
                                     pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_env[1]:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utils_test.libvirt.setup_or_cleanup_nfs(False)
Esempio n. 20
0
    except Exception, e:
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **pool_kwargs)
        raise exceptions.TestError(
            "Error occurred when prepare pool xml:\n %s" % e)
    if os.path.exists(pool_xml_f):
        try:
            f = open(pool_xml_f, 'r')
            logging.debug("Create pool from file: %s", f.read())
        finally:
            f.close()
try:
    # define/create/start the pool
    if (pre_def_pool == "yes") and (define_pool == "yes"):
        pool_define_status = virsh.pool_define(pool_xml_f,
                                               ignore_status=True,
                                               debug=True)
        utlv.check_exit_status(pool_define_status)
    if define_pool_as == "yes":
        pool_define_as_status = virsh.pool_define_as(
            pool_name, pool_type,
            pool_target, pool_extra_args,
            ignore_status=True, debug=True
            )
        utlv.check_exit_status(pool_define_as_status)
    if pool_create_as == "yes":
        if pool_type != "scsi":
            raise exceptions.TestSkipError("pool-create-as only needs to "
                                           "be covered by scsi pool for "
                                           "NPIV test.")
        cmd = "virsh pool-create-as %s %s \
Esempio n. 21
0
def run(test, params, env):
    """
    Test command: virsh pool-define; pool-define-as; pool-start;
    vol-list pool; attach-device LUN to guest; mount the device;
    dd to the mounted device; unmount; pool-destroy; pool-undefine;

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = params.get("pre_def_pool", "no")
    define_pool = params.get("define_pool", "no")
    define_pool_as = params.get("define_pool_as", "no")
    pool_create_as = params.get("pool_create_as", "no")
    need_pool_build = params.get("need_pool_build", "no")
    need_vol_create = params.get("need_vol_create", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("disk_target_dev", "sdc")
    pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE")
    vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE")
    vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    vol_format = params.get("volume_format", 'raw')
    attach_method = params.get("attach_method", "hot")
    test_unit = None
    mount_disk = None
    pool_kwargs = {}
    pool_extra_args = ""
    emulated_image = "emulated-image"
    disk_xml = ""
    new_vhbas = []
    source_dev = ""
    mpath_vol_path = ""
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    if pool_type == "scsi":
        if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn):
            raise exceptions.TestSkipError(
                    "No wwpn and wwnn provided for npiv scsi pool.")
    if pool_type == "logical":
        if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn):
            raise exceptions.TestSkipError(
                    "No wwpn and wwnn provided for vhba.")
    online_hbas_list = utils_npiv.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)
    old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                       replace_existing=True)
    if not online_hbas_list:
        raise exceptions.TestSkipError(
            "Host doesn't have online hba cards")
    old_vhbas = utils_npiv.find_hbas("vhba")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    if not vm.is_alive():
        vm.start()
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    if pool_type == "scsi":
        if define_pool == "yes":
            if pool_adapter_parent == "":
                pool_adapter_parent = online_hbas_list[0]
            pool_kwargs = {'source_path': source_path,
                           'source_name': source_name,
                           'source_format': source_format,
                           'pool_adapter_type': pool_adapter_type,
                           'pool_adapter_parent': pool_adapter_parent,
                           'pool_wwnn': pool_wwnn,
                           'pool_wwpn': pool_wwpn}
    elif pool_type == "logical":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": online_hbas_list[0],
                "scsi_wwnn": vhba_wwnn,
                "scsi_wwpn": vhba_wwpn})
        utils_misc.wait_for(
            lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not sucessfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME*5)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        source_dev = "/dev/mapper/" + new_mpath_devs[0]
        logging.debug("We are going to use \"%s\" as our source device"
                      " to create a logical pool", source_dev)
        try:
            cmd = "parted %s mklabel msdos -s" % source_dev
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
        if define_pool_as == "yes":
            pool_extra_args = ""
            if source_dev:
                pool_extra_args = ' --source-dev %s' % source_dev
    elif pool_type == "mpath":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": online_hbas_list[0],
                "scsi_wwnn": vhba_wwnn,
                "scsi_wwpn": vhba_wwpn})
        utils_misc.wait_for(
            lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not sucessfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME*2)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0]
        try:
            cmd = "parted %s mklabel msdos -s" % mpath_vol_path
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
    if pre_def_pool == "yes":
        try:
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type,
                         pool_target, emulated_image,
                         **pool_kwargs)
            utils_misc.wait_for(
                    lambda: utils_npiv.is_vhbas_added(old_vhbas),
                    _DELAY_TIME*2)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            virsh.pool_destroy(pool_name)
        except Exception as e:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **pool_kwargs)
            raise exceptions.TestError(
                "Error occurred when prepare pool xml:\n %s" % e)
        if os.path.exists(pool_xml_f):
            with open(pool_xml_f, 'r') as f:
                logging.debug("Create pool from file: %s", f.read())
    try:
        # define/create/start the pool
        if (pre_def_pool == "yes") and (define_pool == "yes"):
            pool_define_status = virsh.pool_define(pool_xml_f,
                                                   ignore_status=True,
                                                   debug=True)
            utlv.check_exit_status(pool_define_status)
        if define_pool_as == "yes":
            pool_define_as_status = virsh.pool_define_as(
                pool_name, pool_type,
                pool_target, pool_extra_args,
                ignore_status=True, debug=True
                )
            utlv.check_exit_status(pool_define_as_status)
        if pool_create_as == "yes":
            if pool_type != "scsi":
                raise exceptions.TestSkipError("pool-create-as only needs to "
                                               "be covered by scsi pool for "
                                               "NPIV test.")
            cmd = "virsh pool-create-as %s %s \
                   --adapter-wwnn %s --adapter-wwpn %s \
                   --adapter-parent %s --target %s"\
                   % (pool_name, pool_type, pool_wwnn, pool_wwpn,
                      online_hbas_list[0], pool_target)
            cmd_status = process.system(cmd, verbose=True)
            if cmd_status:
                raise exceptions.TestFail("pool-create-as scsi pool failed.")
        if need_pool_build == "yes":
            pool_build_status = virsh.pool_build(pool_name, "--overwrite")
            utlv.check_exit_status(pool_build_status)

        pool_ins = libvirt_storage.StoragePool()
        if not pool_ins.pool_exists(pool_name):
            raise exceptions.TestFail("define or create pool failed.")
        else:
            if not pool_ins.is_pool_active(pool_name):
                pool_start_status = virsh.pool_start(pool_name)
                utlv.check_exit_status(pool_start_status)
                utlv.check_actived_pool(pool_name)
                pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
                logging.debug("Pool detail: %s", pool_detail)

        # create vol if required
        if need_vol_create == "yes":
            vol_create_as_status = virsh.vol_create_as(
                    volume_name, pool_name,
                    volume_capacity, allocation,
                    vol_format, "", debug=True
                    )
            utlv.check_exit_status(vol_create_as_status)
        virsh.pool_refresh(pool_name)
        vol_list = utlv.get_vol_list(pool_name, vol_check=True,
                                     timeout=_DELAY_TIME*3)
        logging.debug('Volume list is: %s' % vol_list)

        # use test_unit to save the first vol in pool
        if pool_type == "mpath":
            cmd = "virsh vol-list %s | grep \"%s\" |\
                   awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path)
            cmd_result = process.run(cmd, shell=True)
            status = cmd_result.exit_status
            output = cmd_result.stdout_text.strip()
            if cmd_result.exit_status:
                raise exceptions.TestFail("vol-list pool %s failed", pool_name)
            if not output:
                raise exceptions.TestFail("Newly added mpath dev not in pool.")
            test_unit = output
            logging.info(
                "Using %s to attach to a guest", test_unit)
        else:
            test_unit = list(vol_list.keys())[0]
            logging.info(
                "Using the first volume %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()

        # prepare disk xml which will be hot/cold attached to vm
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': vol_format}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        disk_xml_str = open(lun_disk_xml).read()
        logging.debug("The disk xml is: %s", disk_xml_str)

        # hot attach disk xml to vm
        if attach_method == "hot":
            copyfile(lun_disk_xml, disk_xml)
            dev_attach_status = virsh.attach_device(vm_name, disk_xml,
                                                    debug=True)
            # Pool/vol virtual disk is not supported by mpath pool yet.
            if dev_attach_status.exit_status and pool_type == "mpath":
                raise exceptions.TestSkipError("mpath pool vol is not "
                                               "supported in virtual disk yet,"
                                               "the error message is: %s",
                                               dev_attach_status.stderr)
                session.close()
            utlv.check_exit_status(dev_attach_status)
        # cold attach disk xml to vm
        elif attach_method == "cold":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            new_disk = disk.Disk()
            new_disk.xml = disk_xml_str
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.sync()
            logging.debug(vmxml)
            try:
                vm.start()
            except virt_vm.VMStartError as e:
                logging.debug(e)
                if pool_type == "mpath":
                    raise exceptions.TestSkipError("'mpath' pools for backing "
                                                   "'volume' disks isn't "
                                                   "supported for now")
                else:
                    raise exceptions.TestFail("Failed to start vm")
            session = vm.wait_for_login()
        else:
            pass

        # checking attached disk in vm
        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output(
            'echo yes | mkfs.ext4 %s' % mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        # detach disk from vm
        dev_detach_status = virsh.detach_device(vm_name, disk_xml,
                                                debug=True)
        utlv.check_exit_status(dev_detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            data_dir.clean_tmp_files()
            logging.debug("Cleanup disk xml")
        if pre_def_pool == "yes":
            # Do not apply cleanup_pool for logical pool, logical pool will
            # be cleaned below
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **pool_kwargs)
        if (test_unit and
                (need_vol_create == "yes" and (pre_def_pool == "no")) and
                (pool_type == "logical")):
            process.system('lvremove -f %s/%s' % (pool_name, test_unit),
                           verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            utils_npiv.vhbas_cleanup(new_vhbas)
        # Restart multipathd, this is to avoid bz1399075
        if source_dev:
            utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(source_dev),
                                _DELAY_TIME*5, 0.0, 5.0)
        elif mpath_vol_path:
            utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(mpath_vol_path),
                                _DELAY_TIME*5, 0.0, 5.0)
        else:
            utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Esempio n. 22
0
        virsh.pool_destroy(pool_name)
    except Exception, e:
        pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
        raise exceptions.TestError(
            "Error occurred when prepare pool xml:\n %s" % e)
    if os.path.exists(pool_xml_f):
        f = open(pool_xml_f, 'r')
        try:
            logging.debug("Create pool from file:\n %s", f.read())
        finally:
            f.close()

    try:
        cmd_result = virsh.pool_define(pool_xml_f,
                                       ignore_status=True,
                                       debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)
        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        test_unit = vol_list.keys()[0]
        logging.info("Using the first LUN unit %s to attach to a guest",
                     test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
Esempio n. 23
0
def run(test, params, env):
    """
    Test command: virsh pool-define;pool-start;vol-list pool;
    attach-device LUN to guest; mount the device, dd; unmount;
    reboot guest; mount the device, dd again; pool-destroy; pool-undefine;

    Create a libvirt npiv pool from an XML file. The test needs to have a wwpn
    and wwnn of a vhba in host which is zoned & mapped to a SAN controller.

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("pool_target_device", "sdc")
    pool_wwnn = params.get("pool_wwnn", "WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "WWPN_EXAMPLE")
    test_unit = None
    mount_disk = None

    if 'EXAMPLE' in pool_wwnn or 'EXAMPLE' in pool_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if not vm.is_alive():
        vm.start()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    online_hbas_list = nodedev.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)

    # if no online hba cards on host test fails
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    else:
        if pool_adapter_parent == "":
            pool_adapter_parent = online_hbas_list[0]

    kwargs = {'source_path': source_path,
              'source_name': source_name,
              'source_format': source_format,
              'pool_adapter_type': pool_adapter_type,
              'pool_adapter_parent': pool_adapter_parent,
              'pool_wwnn': pool_wwnn,
              'pool_wwpn': pool_wwpn}

    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    old_vhbas = nodedev.find_hbas("vhba")
    try:
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)
        utils_misc.wait_for(
            lambda: nodedev.is_vhbas_added(old_vhbas), _DELAY_TIME)
        virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
        virsh.pool_destroy(pool_name)
    except Exception as e:
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        raise exceptions.TestError(
            "Error occurred when prepare pool xml:\n %s" % e)
    if os.path.exists(pool_xml_f):
        with open(pool_xml_f, 'r') as f:
            logging.debug("Create pool from file:\n %s", f.read())

    try:
        cmd_result = virsh.pool_define(pool_xml_f, ignore_status=True,
                                       debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)
        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        test_unit = list(vol_list.keys())[0]
        logging.info(
            "Using the first LUN unit %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': 'raw'}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)

        copyfile(lun_disk_xml, disk_xml)
        attach_success = virsh.attach_device(
            vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        virsh.reboot(vm_name, debug=True)

        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)

        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output(
            'echo yes | mkfs.ext4 %s' % mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        detach_status = virsh.detach_device(vm_name, disk_xml,
                                            debug=True)
        utlv.check_exit_status(detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            logging.debug("Cleanup disk xml")
            data_dir.clean_tmp_files()
Esempio n. 24
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    new_pool_name = params.get("new_pool_name", "")
    build_option = params.get("build_option", "")
    same_source_test = "yes" == params.get("same_source_test", "no")
    customize_initiator_iqn = "yes" == params.get("customize_initiator_iqn", "no")
    # The file for dumped pool xml
    poolxml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get('ip_protocal', 'ipv4')

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)",
                            str(result.stdout.strip()))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            test.fail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            test.fail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)", str(result.stdout.strip()))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug(
                "Find volume '%s' in pool '%s'.", vol_name, pool_name)
        else:
            test.fail(
                "Not find volume '%s' in pool '%s'." %
                (vol_name, pool_name))

    def is_in_range(actual, expected, error_percent):
        deviation = 100 - (100 * (float(actual) / float(expected)))
        logging.debug("Deviation: %0.2f%%", float(deviation))
        return float(deviation) <= float(error_percent)

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            test.fail("Pool info dictionary is needed.")
        val_tup = ('Capacity', 'Allocation', 'Available')
        if check_point in val_tup and float(value.split()[0]):
            # As from bytes to GiB, could cause deviation, and it should not
            # exceed 1 percent.
            if is_in_range(float(pool_info[check_point].split()[0]),
                           float(value.split()[0]), 1):
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                test.fail("Pool '%s' isn't '%s'." %
                          (check_point, value))
        else:
            if pool_info[check_point] == value:
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                test.fail("Pool '%s' isn't '%s'." %
                          (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {'image_size': '1G', 'pre_disk_vol': ['100M'],
              'source_name': source_name, 'source_path': source_path,
              'source_format': source_format, 'persistent': True,
              'ip_protocal': ip_protocal}
    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=poolxml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Update pool name
        if new_pool_name:
            if "/" in new_pool_name:
                new_pool_name = new_pool_name.replace("/", "\/")
                logging.debug(new_pool_name)
            p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
            p_xml.name = new_pool_name
            del p_xml.uuid
            poolxml = p_xml.xml
            logging.debug("XML after update pool name:\n%s" % p_xml)

        # Update host name
        if same_source_test:
            s_xml = p_xml.get_source()
            s_xml.host_name = "192.168.1.1"
            p_xml.set_source(s_xml)
            poolxml = p_xml.xml
            logging.debug("XML after update host name:\n%s" % p_xml)

        if customize_initiator_iqn:
            initiator_iqn = params.get("initiator_iqn",
                                       "iqn.2018-07.com.virttest:pool.target")
            p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
            s_node = p_xml.xmltreefile.find('/source')
            i_node = ET.SubElement(s_node, 'initiator')
            ET.SubElement(i_node, 'iqn', {'name': initiator_iqn})
            p_xml.xmltreefile.write()
            poolxml = p_xml.xml
            logging.debug('XML after add Multi-IQN:\n%s' % p_xml)

        # Step (4)
        # Undefine pool
        if not same_source_test:
            result = virsh.pool_undefine(pool_name)
            utlv.check_exit_status(result)
            check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(poolxml, debug=True)
        # Give error msg when exit status is not expected
        if "/" in new_pool_name and not result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=639923 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        if "." in new_pool_name and result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1333248 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        if same_source_test and not result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1171984 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            # Step (6)
            # Buid pool
            # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool
            # disk/fs pool: as prepare step already make label and create filesystem
            #               for the disk, use '--overwrite' is necessary
            # logical_pool: build pool will fail if VG already exist, BZ#1373711
            if new_pool_name:
                pool_name = new_pool_name
            if pool_type != "logical":
                result = virsh.pool_build(pool_name, build_option, ignore_status=True)
                utlv.check_exit_status(result)

            # Step (7)
            # Pool start
            result = virsh.pool_start(pool_name, debug=True, ignore_status=True)
            utlv.check_exit_status(result)

            # Step (8)
            # Pool list
            option = "--persistent --type %s" % pool_type
            check_pool_list(pool_name, option)

            # Step (9)
            # Pool autostart
            result = virsh.pool_autostart(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

            # Step (10)
            # Pool list
            option = "--autostart --type %s" % pool_type
            check_pool_list(pool_name, option)

            # Step (11)
            # Restart libvirtd and check the autostart pool
            utils_libvirtd.libvirtd_restart()
            option = "--autostart --persistent"
            check_pool_list(pool_name, option)

            # Step (12)
            # Pool destroy
            if virsh.pool_destroy(pool_name):
                logging.debug("Pool %s destroyed.", pool_name)
            else:
                test.fail("Destroy pool % failed." % pool_name)

            # Step (13)
            # Pool autostart disable
            result = virsh.pool_autostart(pool_name, "--disable",
                                          ignore_status=True)
            utlv.check_exit_status(result)

            # Step (14)
            # Repeat step (11)
            utils_libvirtd.libvirtd_restart()
            option = "--autostart"
            check_pool_list(pool_name, option, True)

            # Step (15)
            # Pool start
            # When libvirtd starts up, it'll check to see if any of the storage
            # pools have been activated externally. If so, then it'll mark the
            # pool as active. This is independent of autostart.
            # So a directory based storage pool is thus pretty much always active,
            # and so as the SCSI pool.
            if pool_type not in ["dir", 'scsi']:
                result = virsh.pool_start(pool_name, ignore_status=True)
                utlv.check_exit_status(result)

            # Step (16)
            # Pool info
            pool_info = _pool.pool_info(pool_name)
            logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

            # Step (17)
            # Pool UUID
            result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_info(pool_info, "UUID", result.stdout.strip())

            # Step (18)
            # Pool Name
            result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_info(pool_info, "Name", result.stdout.strip())

            # Step (19)
            # Pool refresh for 'dir' type pool
            if pool_type == "dir":
                os.mknod(vol_path)
                result = virsh.pool_refresh(pool_name)
                utlv.check_exit_status(result)
                check_vol_list(vol_name, pool_name)

            # Step (20)
            # Create an over size vol in pool(expect fail), then check pool:
            # 'Capacity', 'Allocation' and 'Available'
            # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
            # and glusterfs pool not support create volume, so not test them
            if pool_type != "netfs":
                vol_capacity = "10000G"
                vol_allocation = "10000G"
                result = virsh.vol_create_as("oversize_vol", pool_name,
                                             vol_capacity, vol_allocation, "raw")
                utlv.check_exit_status(result, True)
                new_info = _pool.pool_info(pool_name)
                check_items = ["Capacity", "Allocation", "Available"]
                for i in check_items:
                    check_pool_info(pool_info, i, new_info[i])

            # Step (21)
            # Undefine pool, this should fail as the pool is active
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            utlv.check_exit_status(result, expect_error=True)
            check_pool_list(pool_name, "", False)

            # Step (22)
            # Pool destroy
            if virsh.pool_destroy(pool_name):
                logging.debug("Pool %s destroyed.", pool_name)
            else:
                test.fail("Destroy pool % failed." % pool_name)

            # Step (23)
            # Pool delete for 'dir' type pool
            if pool_type == "dir":
                for f in os.listdir(pool_target):
                    os.remove(os.path.join(pool_target, f))
                    result = virsh.pool_delete(pool_name, ignore_status=True)
                    utlv.check_exit_status(result)
                    option = "--inactive --type %s" % pool_type
                    check_pool_list(pool_name, option)
                    if os.path.exists(pool_target):
                        test.fail("The target path '%s' still exist." %
                                  pool_target)
                        result = virsh.pool_start(pool_name, ignore_status=True)
                        utlv.check_exit_status(result, True)

            # Step (24)
            # Pool undefine
                result = virsh.pool_undefine(pool_name, ignore_status=True)
                utlv.check_exit_status(result)
                check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **kwargs)
            utlv.setup_or_cleanup_iscsi(False)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(poolxml):
            os.remove(poolxml)