def file_remove(params, filename_path):
    """
    Remove the image
    :param params: Dictionary containing the test parameters.
    :param filename_path: path to file
    """
    if params.get("enable_ceph") == "yes":
        image_name = params.get("image_name")
        image_format = params.get("image_format", "qcow2")
        ceph_monitor = params["ceph_monitor"]
        rbd_pool_name = params["rbd_pool_name"]
        rbd_image_name = "%s.%s" % (image_name.split("/")[-1], image_format)
        return ceph.rbd_image_rm(ceph_monitor, rbd_pool_name, rbd_image_name)

    if params.get("gluster_brick"):
        # TODO: Add implementation for gluster_brick
        return

    if params.get('storage_type') in ('iscsi', 'lvm'):
        # TODO: Add implementation for iscsi/lvm
        return

    if os.path.exists(filename_path):
        os.unlink(filename_path)
        return
Exemple #2
0
def file_remove(params, filename_path):
    """
    Remove the image
    :param params: Dictionary containing the test parameters.
    :param filename_path: path to file
    """
    if params.get("enable_ceph") == "yes":
        image_name = params.get("image_name")
        image_format = params.get("image_format", "qcow2")
        ceph_monitor = params.get("ceph_monitor")
        rbd_pool_name = params["rbd_pool_name"]
        rbd_namespace_name = params.get("rbd_namespace_name")
        rbd_image_name = "%s.%s" % (image_name.split("/")[-1], image_format)
        ceph_conf = params.get("ceph_conf")
        keyring_conf = params.get("image_ceph_keyring_conf")
        return ceph.rbd_image_rm(ceph_monitor, rbd_pool_name, rbd_image_name,
                                 ceph_conf, keyring_conf, rbd_namespace_name)

    if params.get("gluster_brick"):
        # TODO: Add implementation for gluster_brick
        return

    if params.get('storage_type') in ('iscsi', 'lvm', 'iscsi-direct'):
        # TODO: Add implementation for iscsi/lvm
        return

    # skip removing raw device
    if params.get('image_raw_device') == 'yes':
        return

    if os.path.exists(filename_path):
        os.unlink(filename_path)
        return
Exemple #3
0
def file_remove(params, filename_path):
    """
    Remove the image
    :param params: Dictionary containing the test parameters.
    :param filename_path: path to file
    """
    if params.get("enable_ceph") == "yes":
        image_name = params.get("image_name")
        image_format = params.get("image_format", "qcow2")
        ceph_monitor = params["ceph_monitor"]
        rbd_pool_name = params["rbd_pool_name"]
        rbd_image_name = "%s.%s" % (image_name.split("/")[-1], image_format)
        return ceph.rbd_image_rm(ceph_monitor, rbd_pool_name, rbd_image_name)
Exemple #4
0
def file_remove(params, filename_path):
    """
    Remove the image
    :param params: Dictionary containing the test parameters.
    :param filename_path: path to file
    """
    if params.get("enable_ceph") == "yes":
        image_name = params.get("image_name")
        image_format = params.get("image_format", "qcow2")
        ceph_monitor = params["ceph_monitor"]
        rbd_pool_name = params["rbd_pool_name"]
        rbd_image_name = "%s.%s" % (image_name.split("/")[-1], image_format)
        return ceph.rbd_image_rm(ceph_monitor, rbd_pool_name, rbd_image_name)
Exemple #5
0
def run(test, params, env):
    """
    Test command: virsh blockpull <domain> <path>

    1) Prepare test environment.
    2) Populate a disk from its backing image.
    3) Recover test environment.
    4) Check result.
    """

    def make_disk_snapshot(snapshot_take):
        """
        Make external snapshots for disks only.

        :param snapshot_take: snapshots taken.
        """
        for count in range(1, snapshot_take + 1):
            snap_xml = snapshot_xml.SnapshotXML()
            snapshot_name = "snapshot_test%s" % count
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test %s" % count

            # Add all disks into xml file.
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile

                # Skip cdrom
                if disk_xml.device == "cdrom":
                    continue
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = "external"
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                new_attrs = disk_xml.source.attrs
                if 'file' in disk_xml.source.attrs:
                    file_name = disk_xml.source.attrs['file']
                    new_file = "%s.snap%s" % (file_name.split('.')[0],
                                              count)
                    snapshot_external_disks.append(new_file)
                    new_attrs.update({'file': new_file})
                    hosts = None
                elif ('name' in disk_xml.source.attrs and
                      disk_src_protocol == 'gluster'):
                    src_name = disk_xml.source.attrs['name']
                    new_name = "%s.snap%s" % (src_name.split('.')[0],
                                              count)
                    new_attrs.update({'name': new_name})
                    snapshot_external_disks.append(new_name)
                    hosts = disk_xml.source.hosts
                elif ('dev' in disk_xml.source.attrs or
                      'name' in disk_xml.source.attrs):
                    if (disk_xml.type_name == 'block' or
                            disk_src_protocol in ['iscsi', 'rbd']):
                        # Use local file as external snapshot target for block
                        # and iscsi network type.
                        # As block device will be treat as raw format by
                        # default, it's not fit for external disk snapshot
                        # target. A work around solution is use qemu-img again
                        # with the target.
                        # And external active snapshots are not supported on
                        # 'network' disks using 'iscsi' protocol
                        disk_xml.type_name = 'file'
                        if 'dev' in new_attrs:
                            del new_attrs['dev']
                        elif 'name' in new_attrs:
                            del new_attrs['name']
                            del new_attrs['protocol']
                        new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count)
                        snapshot_external_disks.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                new_src_dict = {"attrs": new_attrs}
                if hosts:
                    new_src_dict.update({"hosts": hosts})
                disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options = "--disk-only --xmlfile %s " % snapshot_xml_path

            snapshot_result = virsh.snapshot_create(
                vm_name, options, debug=True)

            if snapshot_result.exit_status != 0:
                test.fail(snapshot_result.stderr)

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                test.fail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

    def get_first_disk_source():
        """
        Get disk source of first device
        :return: first disk of first device.
        """
        first_device = vm.get_first_disk_devices()
        firt_disk_src = first_device['source']
        return firt_disk_src

    def make_relative_path_backing_files():
        """
        Create backing chain files of relative path.

        :return: absolute path of top active file
        """
        first_disk_source = get_first_disk_source()
        basename = os.path.basename(first_disk_source)
        root_dir = os.path.dirname(first_disk_source)
        cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}')
        ret = process.run(cmd, shell=True)
        libvirt.check_exit_status(ret)

        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "../%s" % basename
        backing_file_dict["c"] = "../b/b.img"
        backing_file_dict["d"] = "../c/c.img"
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img"
                   % (backing_file_path, value, key))
            ret = process.run(cmd, shell=True)
            libvirt.check_exit_status(ret)
        return os.path.join(backing_file_path, "d.img")

    def check_chain_backing_files(disk_src_file, expect_backing_file=False):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_file: whether it expect to have backing files.
        """
        first_disk_source = get_first_disk_source()
        # Validate source image need refer to original one after active blockcommit
        if not expect_backing_file and disk_src_file not in first_disk_source:
            test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file))
        # Validate source image doesn't have backing files after active blockcommit
        cmd = "qemu-img info %s --backing-chain" % first_disk_source
        if qemu_img_locking_feature_support:
            cmd = "qemu-img info -U %s --backing-chain" % first_disk_source
        ret = process.run(cmd, shell=True).stdout_text.strip()
        if expect_backing_file:
            if 'backing file' not in ret:
                test.fail("The disk image doesn't have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)
        else:
            if 'backing file' in ret:
                test.fail("The disk image still have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    snapshot_take = int(params.get("snapshot_take", '0'))
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no')
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no')
    bandwidth = params.get("bandwidth", None)
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", None)
    keep_relative = "yes" == params.get("keep_relative", 'no')
    virsh_dargs = {'debug': True}

    # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10
    qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support()
    backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no")

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_target = params.get("disk_target", 'vda')
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", "no")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_data_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" % vm_name)

    snapshot_external_disks = []
    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                test.cancel("'iscsi' disk doesn't support in"
                            " current libvirt version.")
        if disk_src_protocol == 'gluster':
            if not libvirt_version.version_compare(1, 2, 7):
                test.cancel("Snapshot on glusterfs not"
                            " support in current "
                            "version. Check more info "
                            " with https://bugzilla.re"
                            "dhat.com/show_bug.cgi?id="
                            "1017289")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                # Create config file if it doesn't exist
                ceph_cfg = ceph.create_config_file(mon_host)
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    test.cancel("Please provide ceph host first.")
                detected_distro = distro.detect()
                rbd_img_prefix = '_'.join(['rbd', detected_distro.name,
                                           detected_distro.version,
                                           detected_distro.release,
                                           detected_distro.arch])
                params.update(
                   {"disk_source_name": os.path.join(
                      pool_name,
                      rbd_img_prefix + '.img')})
                if utils_package.package_install(["ceph-common"]):
                    ceph.rbd_image_rm(
                        mon_host, *params.get("disk_source_name").split('/'))
                else:
                    test.error('Failed to install ceph-common package.')
            if backing_file_relative_path:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                replace_disk_image = make_relative_path_backing_files()
                params.update({'disk_source_name': replace_disk_image,
                               'disk_type': 'file',
                               'disk_src_protocol': 'file'})
                vm.start()
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        # The first disk is supposed to include OS
        # We will perform blockpull operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        make_disk_snapshot(snapshot_take)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("The domain xml after snapshot is %s" % vmxml)

        # snapshot src file list
        snap_src_lst = [blk_source]
        snap_src_lst += snapshot_external_disks

        if snap_in_mirror:
            blockpull_options = "--bandwidth 1"
        else:
            blockpull_options = "--wait --verbose"

        if with_timeout:
            blockpull_options += " --timeout 1"

        if bandwidth:
            blockpull_options += " --bandwidth %s" % bandwidth

        if base_option == "async":
            blockpull_options += " --async"

        base_image = None
        base_index = None
        if (libvirt_version.version_compare(1, 2, 4) or
                disk_src_protocol == 'gluster'):
            # For libvirt is older version than 1.2.4 or source protocol is gluster
            # there are various base image,which depends on base option:shallow,base,top respectively
            if base_option == "shallow":
                base_index = 1
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "base":
                base_index = 2
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "top":
                base_index = 0
                base_image = "%s[%s]" % (disk_target, base_index)
        else:
            if base_option == "shallow":
                base_image = snap_src_lst[3]
            elif base_option == "base":
                base_image = snap_src_lst[2]
            elif base_option == "top":
                base_image = snap_src_lst[4]

        if base_option and base_image:
            blockpull_options += " --base %s" % base_image

        if keep_relative:
            blockpull_options += " --keep-relative"

        if backing_file_relative_path:
            # Use block commit to shorten previous snapshots.
            blockcommit_options = "  --active --verbose --shallow --pivot --keep-relative"
            for count in range(1, snapshot_take + 1):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)

            #Use block pull with --keep-relative flag,and reset base_index to 2.
            base_index = 2
            for count in range(1, snapshot_take):
                if count >= 3:
                    if libvirt_version.version_compare(6, 0, 0):
                        break
                    # If block pull operations are more than or equal to 3,
                    # it need reset base_index to 1. It only affects the test
                    # of libvirt < 6.0.0
                    base_index = 1
                base_image = "%s[%s]" % (disk_target, base_index)
                blockpull_options = "  --wait --verbose --base %s --keep-relative" % base_image
                res = virsh.blockpull(vm_name, blk_target,
                                      blockpull_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)

                if libvirt_version.version_compare(6, 0, 0):
                    base_index += 1
            # Check final backing chain files.
            check_chain_backing_files(blk_source_image, True)
            return
        # Run test case
        result = virsh.blockpull(vm_name, blk_target,
                                 blockpull_options, **virsh_dargs)
        status = result.exit_status

        # If pull job aborted as timeout, the exit status is different
        # on RHEL6(0) and RHEL7(1)
        if with_timeout and 'Pull aborted' in result.stdout.strip():
            if libvirt_version.version_compare(1, 1, 1):
                status_error = True
            else:
                status_error = False

        # Check status_error
        libvirt.check_exit_status(result, status_error)

        if not status and not with_timeout:
            if snap_in_mirror:
                snap_mirror_path = "%s/snap_mirror" % tmp_dir
                snap_options = "--diskspec vda,snapshot=external,"
                snap_options += "file=%s --disk-only" % snap_mirror_path
                snapshot_external_disks.append(snap_mirror_path)
                ret = virsh.snapshot_create_as(vm_name, snap_options,
                                               ignore_status=True,
                                               debug=True)
                libvirt.check_exit_status(ret, snap_in_mirror_err)
                return

            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            logging.debug("after pull the disk xml is: %s"
                          % disk_xml)
            if libvirt_version.version_compare(1, 2, 4):
                err_msg = "Domain image backing chain check failed"
                if not base_option or "async" in base_option:
                    chain_lst = snap_src_lst[-1:]
                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        test.fail(err_msg)
                elif "base" or "shallow" in base_option:
                    if not base_index and base_image:
                        base_index = chain_lst.index(base_image)

                    chain_lst = snap_src_lst[:base_index][::-1]
                    if not libvirt_version.version_compare(6, 0, 0):
                        chain_lst = snap_src_lst[::-1][base_index:]
                    chain_lst.insert(0, snap_src_lst[-1])

                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        test.fail(err_msg)

        # If base image is the top layer of snapshot chain,
        # virsh blockpull should fail, return directly
        if base_option == "top":
            return

        # Check flag files
        for flag in snapshot_flag_files:
            status, output = session.cmd_status_output("cat %s" % flag)
            if status:
                test.fail("blockpull failed: %s" % output)

    finally:
        # Remove ceph configure file if created
        if ceph_cfg:
            os.remove(ceph_cfg)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")
        # Clean ceph image if used in test
        if 'mon_host' in locals():
            if utils_package.package_install(["ceph-common"]):
                disk_source_name = params.get("disk_source_name")
                cmd = ("rbd -m {0} info {1} && rbd -m {0} rm "
                       "{1}".format(mon_host, disk_source_name))
                cmd_result = process.run(cmd, ignore_status=True, shell=True)
                logging.debug("result of rbd removal: %s", cmd_result)
            else:
                logging.debug('Failed to install ceph-common to clean ceph.')

        if not disk_src_protocol or disk_src_protocol != 'gluster':
            for disk in snapshot_external_disks:
                if os.path.exists(disk):
                    os.remove(disk)

        if backing_file_relative_path:
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
            process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True)

        libvirtd = utils_libvirtd.Libvirtd()

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            logging.info("clean gluster env")
            gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)
Exemple #6
0
def run(test, params, env):
    """
    Test push-mode incremental backup

    Steps:
    1. create a vm with extra disk vdb
    2. create some data on vdb in vm
    3. start a push mode full backup on vdb
    4. create some data on vdb in vm
    5. start a push mode incremental backup
    6. repeat step 4 and 5 as required
    7. check the full/incremental backup file data
    """
    def backup_job_done(vm_name, vm_disk):
        """
        Check if a backup job for a vm's specific disk is finished.

        :param vm_name: vm's name
        :param vm_disk: the disk to be checked, such as 'vdb'
        :return: 'True' means job finished
        """
        result = virsh.blockjob(vm_name, vm_disk, debug=True)
        if "no current block job" in result.stdout_text.strip().lower():
            return True

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    target_driver = params.get("target_driver", "qcow2")
    target_type = params.get("target_type", "file")
    target_blkdev_path = params.get("target_blkdev_path")
    target_blkdev_size = params.get("target_blkdev_size", original_disk_size)
    reuse_target_file = "yes" == params.get("reuse_target_file")
    prepare_target_file = "yes" == params.get("prepare_target_file")
    prepare_target_blkdev = "yes" == params.get("prepare_target_blkdev")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    tmp_dir = data_dir.get_data_dir()
    virsh_dargs = {'debug': True, 'ignore_status': True}

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "ceph":
            ceph_mon_host = params.get("ceph_mon_host",
                                       "EXAMPLE_MON_HOST_AUTHX")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORT")
            ceph_pool_name = params.get("ceph_pool_name", "EXAMPLE_POOL")
            ceph_file_name = params.get("ceph_file_name", "EXAMPLE_FILE")
            ceph_disk_name = ceph_pool_name + "/" + ceph_file_name
            ceph_client_name = params.get("ceph_client_name",
                                          "EXAMPLE_CLIENT_NAME")
            ceph_client_key = params.get("ceph_client_key",
                                         "EXAMPLE_CLIENT_KEY")
            ceph_auth_user = params.get("ceph_auth_user", "EXAMPLE_AUTH_USER")
            ceph_auth_key = params.get("ceph_auth_key", "EXAMPLE_AUTH_KEY")
            auth_sec_usage_type = "ceph"

            enable_auth = "yes" == params.get("enable_auth", "yes")
            key_file = os.path.join(tmp_dir, "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_host)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {
                        "sec_usage": auth_sec_usage_type,
                        "sec_name": "ceph_auth_secret"
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid,
                                           ceph_auth_key,
                                           debug=True)
                    disk_params_auth = {
                        "auth_user": ceph_auth_user,
                        "secret_type": auth_sec_usage_type,
                        "secret_uuid": auth_sec_uuid,
                        "auth_in_source": True
                    }
                else:
                    test.error("No ceph client name/key provided.")
                disk_path = "rbd:%s:mon_host=%s:keyring=%s" % (
                    ceph_disk_name, ceph_mon_host, key_file)
            ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name, ceph_file_name,
                              ceph_cfg, key_file)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'rbd',
                'source_name': ceph_disk_name,
                'source_host_name': ceph_mon_host,
                'source_host_port': ceph_host_port
            }
            disk_params.update(disk_params_src)
            disk_params.update(disk_params_auth)
        else:
            test.error("The disk type '%s' not supported in this script." %
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_path_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "push"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = target_type
                    target_params = {"attrs": {}}
                    if target_type == "file":
                        target_file_name = "target_file_%s" % backup_index
                        target_file_path = os.path.join(
                            tmp_dir, target_file_name)
                        if prepare_target_file:
                            libvirt.create_local_disk("file", target_file_path,
                                                      original_disk_size,
                                                      target_driver)
                        target_params["attrs"]["file"] = target_file_path
                        backup_path_list.append(target_file_path)
                    elif target_type == "block":
                        if prepare_target_blkdev:
                            target_blkdev_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=target_blkdev_size)
                        target_params["attrs"]["dev"] = target_blkdev_path
                        backup_path_list.append(target_blkdev_path)
                    else:
                        test.fail(
                            "We do not support backup target type: '%s'" %
                            target_type)
                    logging.debug("target params: %s", target_params)
                    backup_disk_params["backup_target"] = target_params
                    driver_params = {"type": target_driver}
                    backup_disk_params["backup_driver"] = driver_params
                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)
            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()

            if reuse_target_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())

            # Wait for the backup job actually finished
            if not utils_misc.wait_for(
                    lambda: backup_job_done(vm_name, original_disk_target),
                    60):
                test.fail("Backup job not finished in 60s")

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)

        for backup_path in backup_path_list:
            if target_driver == "qcow2":
                # Clear backup image's backing file before comparison
                qemu_cmd = ("qemu-img rebase -u -f qcow2 -b '' -F qcow2 %s" %
                            backup_path)
                process.run(qemu_cmd, shell=True, verbose=True)
            if not utils_backup.cmp_backup_data(
                    original_data_file,
                    backup_path,
                    backup_file_driver=target_driver):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_path))
            else:
                logging.debug("'%s' contains correct backup data", backup_path)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints
        if "checkpoint_list" in locals() and checkpoint_list:
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name, checkpoint_name)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove local backup file
        if "target_file_path" in locals():
            if os.path.exists(target_file_path):
                os.remove(target_file_path)

        # Remove iscsi devices
        libvirt.setup_or_cleanup_iscsi(False)

        # Remove ceph related data
        if original_disk_type == "ceph":
            ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name, ceph_file_name,
                              ceph_cfg, key_file)
            if "auth_sec_uuid" in locals() and auth_sec_uuid:
                virsh.secret_undefine(auth_sec_uuid)
            if "ceph_cfg" in locals() and os.path.exists(ceph_cfg):
                os.remove(ceph_cfg)
            if os.path.exists(key_file):
                os.remove(key_file)
Exemple #7
0
def run(test, params, env):
    """
    Test command: virsh blockcommit <domain> <path>

    1) Prepare test environment.
    2) Commit changes from a snapshot down to its backing image.
    3) Recover test environment.
    4) Check result.
    """
    def make_disk_snapshot(postfix_n,
                           snapshot_take,
                           is_check_snapshot_tree=False,
                           is_create_image_file_in_vm=False):
        """
        Make external snapshots for disks only.

        :param postfix_n: postfix option
        :param snapshot_take: snapshots taken.
        :param is_create_image_file_in_vm: create image file in VM.
        """
        # Add all disks into command line.
        disks = vm.get_disk_devices()

        # Make three external snapshots for disks only
        for count in range(1, snapshot_take):
            options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count)
            options += "--disk-only --atomic --no-metadata"
            if needs_agent:
                options += " --quiesce"

            for disk in disks:
                disk_detail = disks[disk]
                basename = os.path.basename(disk_detail['source'])

                # Remove the original suffix if any, appending
                # ".postfix_n[0-9]"
                diskname = basename.split(".")[0]
                snap_name = "%s.%s%s" % (diskname, postfix_n, count)
                disk_external = os.path.join(tmp_dir, snap_name)

                snapshot_external_disks.append(disk_external)
                options += " %s,snapshot=external,file=%s" % (disk,
                                                              disk_external)

            if is_check_snapshot_tree:
                options = options.replace("--no-metadata", "")
            cmd_result = virsh.snapshot_create_as(vm_name,
                                                  options,
                                                  ignore_status=True,
                                                  debug=True)
            status = cmd_result.exit_status
            if status != 0:
                test.fail("Failed to make snapshots for disks!")

            if is_create_image_file_in_vm:
                create_file_cmd = "dd if=/dev/urandom of=/mnt/snapshot_%s.img bs=1M count=2" % count
                session.cmd_status_output(create_file_cmd)
                created_image_files_in_vm.append("snapshot_%s.img" % count)

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                test.fail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

        def check_snapshot_tree():
            """
            Check whether predefined snapshot names are equals to
            snapshot names by virsh snapshot-list --tree
            """
            predefined_snapshot_name_list = []
            for count in range(1, snapshot_take):
                predefined_snapshot_name_list.append("%s_%s" %
                                                     (postfix_n, count))
            snapshot_list_cmd = "virsh snapshot-list %s --tree" % vm_name
            result_output = process.run(snapshot_list_cmd,
                                        ignore_status=True,
                                        shell=True).stdout_text
            virsh_snapshot_name_list = []
            for line in result_output.rsplit("\n"):
                strip_line = line.strip()
                if strip_line and "|" not in strip_line:
                    virsh_snapshot_name_list.append(strip_line)
            # Compare two lists in their order and values, all need to be same.
            compare_list = [
                out_p for out_p, out_v in zip(predefined_snapshot_name_list,
                                              virsh_snapshot_name_list)
                if out_p not in out_v
            ]
            if compare_list:
                test.fail("snapshot tree not correctly returned.")

        # If check_snapshot_tree is True, check snapshot tree output.
        if is_check_snapshot_tree:
            check_snapshot_tree()

    def get_first_disk_source():
        """
        Get disk source of first device
        :return: first disk of first device.
        """
        first_device = vm.get_first_disk_devices()
        first_disk_src = first_device['source']
        return first_disk_src

    def make_relative_path_backing_files(pre_set_root_dir=None):
        """
        Create backing chain files of relative path.
        :param pre_set_root_dir: preset root dir
        :return: absolute path of top active file
        """
        first_disk_source = get_first_disk_source()
        basename = os.path.basename(first_disk_source)
        if pre_set_root_dir is None:
            root_dir = os.path.dirname(first_disk_source)
        else:
            root_dir = pre_set_root_dir
        cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}')
        ret = process.run(cmd, shell=True)
        libvirt.check_exit_status(ret)

        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "../%s" % basename
        backing_file_dict["c"] = "../b/b.img"
        backing_file_dict["d"] = "../c/c.img"
        if pre_set_root_dir:
            backing_file_dict["b"] = "%s" % first_disk_source
            backing_file_dict["c"] = "%s/b/b.img" % root_dir
            backing_file_dict["d"] = "%s/c/c.img" % root_dir
        disk_format = params.get("disk_format", "qcow2")
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            cmd = (
                "cd %s && qemu-img create -f %s -o backing_file=%s,backing_fmt=%s %s.img"
                % (backing_file_path, "qcow2", value, disk_format, key))
            ret = process.run(cmd, shell=True)
            disk_format = "qcow2"
            libvirt.check_exit_status(ret)
        return os.path.join(backing_file_path, "d.img")

    def check_chain_backing_files(disk_src_file, expect_backing_file=False):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_file: whether it expect to have backing files.
        """
        first_disk_source = get_first_disk_source()
        # Validate source image need refer to original one after active blockcommit
        if not expect_backing_file and disk_src_file not in first_disk_source:
            test.fail(
                "The disk image path:%s doesn't include the origin image: %s" %
                (first_disk_source, disk_src_file))
        # Validate source image doesn't have backing files after active blockcommit
        cmd = "qemu-img info %s --backing-chain" % first_disk_source
        if qemu_img_locking_feature_support:
            cmd = "qemu-img info -U %s --backing-chain" % first_disk_source
        ret = process.run(cmd, shell=True).stdout_text.strip()
        if expect_backing_file:
            if 'backing file' not in ret:
                test.fail("The disk image doesn't have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)
        else:
            if 'backing file' in ret:
                test.fail("The disk image still have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)

    def create_reuse_external_snapshots(pre_set_root_dir=None):
        """
        Create reuse external snapshots
        :param pre_set_root_dir: preset root directory
        :return: absolute path of base file
        """
        if pre_set_root_dir is None:
            first_disk_source = get_first_disk_source()
            basename = os.path.basename(first_disk_source)
            root_dir = os.path.dirname(first_disk_source)
        else:
            root_dir = pre_set_root_dir
        meta_options = " --reuse-external --disk-only --no-metadata"
        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "b.img"
        backing_file_dict["c"] = "c.img"
        backing_file_dict["d"] = "d.img"
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            external_snap_shot = "%s/%s" % (backing_file_path, value)
            snapshot_external_disks.append(external_snap_shot)
            options = "%s --diskspec %s,file=%s" % (meta_options, disk_target,
                                                    external_snap_shot)
            cmd_result = virsh.snapshot_create_as(vm_name,
                                                  options,
                                                  ignore_status=False,
                                                  debug=True)
            libvirt.check_exit_status(cmd_result)
        logging.debug('reuse external snapshots:%s' % snapshot_external_disks)
        return root_dir

    def check_file_in_vm():
        """
        Check whether certain image files exists in VM internal.
        """
        for img_file in created_image_files_in_vm:
            status, output = session.cmd_status_output("ls -l /mnt/%s" %
                                                       img_file)
            logging.debug(output)
            if status:
                test.fail(
                    "blockcommit from top to base failed when ls image file in VM: %s"
                    % output)

    def do_blockcommit_pivot_repeatedly():
        """
        Validate bugzilla:https://bugzilla.redhat.com/show_bug.cgi?id=1857735
        """
        # Make external snapshot,pivot and delete snapshot file repeatedly.
        tmp_snapshot_name = "external_snapshot_" + "repeated.qcow2"
        block_target = 'vda'
        for count in range(0, 5):
            options = "%s " % tmp_snapshot_name
            options += "--disk-only --atomic"
            disk_external = os.path.join(tmp_dir, tmp_snapshot_name)
            options += "  --diskspec  %s,snapshot=external,file=%s" % (
                block_target, disk_external)
            virsh.snapshot_create_as(vm_name,
                                     options,
                                     ignore_status=False,
                                     debug=True)
            virsh.blockcommit(vm_name,
                              block_target,
                              " --active --pivot ",
                              ignore_status=False,
                              debug=True)
            virsh.snapshot_delete(vm_name, tmp_snapshot_name, " --metadata")
            libvirt.delete_local_disk('file', disk_external)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    snapshot_take = int(params.get("snapshot_take", '0'))
    vm_state = params.get("vm_state", "running")
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    top_inactive = ("yes" == params.get("top_inactive"))
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    cmd_timeout = params.get("cmd_timeout", "1")
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", "none")
    middle_base = "yes" == params.get("middle_base", "no")
    pivot_opt = "yes" == params.get("pivot_opt", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", "no")
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no")
    with_active_commit = "yes" == params.get("with_active_commit", "no")
    multiple_chain = "yes" == params.get("multiple_chain", "no")
    virsh_dargs = {'debug': True}
    check_snapshot_tree = "yes" == params.get("check_snapshot_tree", "no")
    bandwidth = params.get("blockcommit_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    disk_target = params.get("disk_target", "vda")
    disk_format = params.get("disk_format", "qcow2")
    reuse_external_snapshot = "yes" == params.get("reuse_external_snapshot",
                                                  "no")
    restart_vm_before_commit = "yes" == params.get("restart_vm_before_commit",
                                                   "no")
    check_image_file_in_vm = "yes" == params.get("check_image_file_in_vm",
                                                 "no")
    pre_set_root_dir = None
    blk_source_folder = None
    convert_qcow2_image_to_raw = "yes" == params.get(
        "convert_qcow2_image_to_raw", "no")
    repeatedly_do_blockcommit_pivot = "yes" == params.get(
        "repeatedly_do_blockcommit_pivot", "no")
    from_top_without_active_option = "yes" == params.get(
        "from_top_without_active_option", "no")
    top_to_middle_keep_overlay = "yes" == params.get(
        "top_to_middle_keep_overlay", "no")
    block_disk_type_based_on_file_backing_file = "yes" == params.get(
        "block_disk_type_based_on_file_backing_file", "no")
    block_disk_type_based_on_gluster_backing_file = "yes" == params.get(
        "block_disk_type_based_on_gluster_backing_file", "no")

    # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10
    qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support(
    )
    backing_file_relative_path = "yes" == params.get(
        "backing_file_relative_path", "no")

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", 'no')
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    if not top_inactive:
        if not libvirt_version.version_compare(1, 2, 4):
            test.cancel("live active block commit is not supported"
                        " in current libvirt version.")

    # This is brought by new feature:block-dev
    if (libvirt_version.version_compare(6, 0, 0)
            and params.get("transport", "") == "rdma"):
        test.cancel("If blockdev is enabled, the transport protocol 'rdma' is "
                    "not yet supported.")

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" % vm_name)

    snapshot_external_disks = []
    cmd_session = None
    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ''
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'block' and reuse_external_snapshot:
            first_disk = vm.get_first_disk_devices()
            pre_set_root_dir = os.path.dirname(first_disk['source'])

        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                test.cancel("'iscsi' disk doesn't support in"
                            " current libvirt version.")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                # Create config file if it doesn't exist
                ceph_cfg = ceph.create_config_file(mon_host)
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    test.cancel("Please provide rbd host first.")

                params.update({
                    "disk_source_name":
                    os.path.join(
                        pool_name,
                        'rbd_' + utils_misc.generate_random_string(4) + '.img')
                })
                if utils_package.package_install(["ceph-common"]):
                    ceph.rbd_image_rm(
                        mon_host,
                        *params.get("disk_source_name").split('/'))
                else:
                    test.error('Failed to install ceph-common to clean image.')
            if backing_file_relative_path:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                replace_disk_image = make_relative_path_backing_files()
                params.update({
                    'disk_source_name': replace_disk_image,
                    'disk_type': 'file',
                    'disk_src_protocol': 'file'
                })
                vm.start()
            if convert_qcow2_image_to_raw:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                blk_source_image_after_converted = "%s/converted_%s" % (
                    blk_source_folder, blk_source_image)
                # Convert the image from qcow2 to raw
                convert_disk_cmd = ("qemu-img convert"
                                    " -O %s %s %s" %
                                    (disk_format, first_src_file,
                                     blk_source_image_after_converted))
                process.run(convert_disk_cmd, ignore_status=False, shell=True)
                params.update({
                    'disk_source_name': blk_source_image_after_converted,
                    'disk_type': 'file',
                    'disk_src_protocol': 'file'
                })
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        if repeatedly_do_blockcommit_pivot:
            do_blockcommit_pivot_repeatedly()

        # Create block type disk on file backing file
        if block_disk_type_based_on_file_backing_file or block_disk_type_based_on_gluster_backing_file:
            if not vm.is_alive():
                vm.start()
            first_src_file = get_first_disk_source()
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
            iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True)
            block_type_backstore = iscsi_target
            if block_disk_type_based_on_file_backing_file:
                first_src_file = get_first_disk_source()
            if block_disk_type_based_on_gluster_backing_file:
                first_src_file = "gluster://%s/%s/gluster.qcow2" % (
                    params.get("gluster_server_ip"), params.get("vol_name"))
            backing_file_create_cmd = (
                "qemu-img create -f %s -o backing_file=%s,backing_fmt=%s %s" %
                ("qcow2", first_src_file, "qcow2", block_type_backstore))
            process.run(backing_file_create_cmd,
                        ignore_status=False,
                        shell=True)
            meta_options = " --reuse-external --disk-only --no-metadata"
            options = "%s --diskspec %s,file=%s" % (meta_options, 'vda',
                                                    block_type_backstore)
            virsh.snapshot_create_as(vm_name,
                                     options,
                                     ignore_status=False,
                                     debug=True)

        # The first disk is supposed to include OS
        # We will perform blockcommit operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []
        created_image_files_in_vm = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        postfix_n = 'snap'
        if reuse_external_snapshot:
            make_relative_path_backing_files(pre_set_root_dir)
            blk_source_folder = create_reuse_external_snapshots(
                pre_set_root_dir)
        else:
            make_disk_snapshot(postfix_n, snapshot_take, check_snapshot_tree,
                               check_image_file_in_vm)

        basename = os.path.basename(blk_source)
        diskname = basename.split(".")[0]
        snap_src_lst = [blk_source]
        if multiple_chain:
            snap_name = "%s.%s1" % (diskname, postfix_n)
            snap_top = os.path.join(tmp_dir, snap_name)
            top_index = snapshot_external_disks.index(snap_top) + 1
            omit_list = snapshot_external_disks[top_index:]
            vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disk_xml = ''
            disk_xmls = vmxml.get_devices(device_type="disk")
            for disk in disk_xmls:
                if disk.get('device_tag') == 'disk':
                    disk_xml = disk
                    break

            vmxml.del_device(disk_xml)
            disk_dict = {'attrs': {'file': snap_top}}
            disk_xml.source = disk_xml.new_disk_source(**disk_dict)
            if libvirt_version.version_compare(6, 0, 0):
                bs_source = {'file': blk_source}
                bs_dict = {
                    "type": params.get("disk_type", "file"),
                    "format": {
                        'type': params.get("disk_format", "qcow2")
                    }
                }
                new_bs = disk_xml.new_backingstore(**bs_dict)
                new_bs["source"] = disk_xml.backingstore.new_source(
                    **bs_source)
                disk_xml.backingstore = new_bs
            vmxml.add_device(disk_xml)
            vmxml.sync()
            vm.start()
            session = vm.wait_for_login()
            postfix_n = 'new_snap'
            make_disk_snapshot(postfix_n, snapshot_take)
            snap_src_lst = [blk_source]
            snap_src_lst += snapshot_external_disks
            logging.debug("omit list is %s", omit_list)
            for i in omit_list:
                snap_src_lst.remove(i)
        else:
            # snapshot src file list
            snap_src_lst += snapshot_external_disks
        backing_chain = ''
        for i in reversed(list(range(snapshot_take))):
            if i == 0:
                backing_chain += "%s" % snap_src_lst[i]
            else:
                backing_chain += "%s -> " % snap_src_lst[i]

        logging.debug("The backing chain is: %s" % backing_chain)

        # check snapshot disk xml backingStore is expected
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] != blk_target:
                continue
            else:
                if disk.device != 'disk':
                    continue
                disk_xml = disk.xmltreefile
                logging.debug("the target disk xml after snapshot is %s",
                              disk_xml)
                break

        if not disk_xml:
            test.fail("Can't find disk xml with target %s" % blk_target)
        elif libvirt_version.version_compare(1, 2, 4):
            # backingStore element introduced in 1.2.4
            chain_lst = snap_src_lst[::-1]
            ret = check_chain_xml(disk_xml, chain_lst)
            if not ret:
                test.fail("Domain image backing chain check failed")

        # set blockcommit_options
        top_image = None
        blockcommit_options = "--wait --verbose"

        if with_timeout:
            blockcommit_options += " --timeout %s" % cmd_timeout

        if base_option == "shallow":
            blockcommit_options += " --shallow"
        elif base_option == "base":
            if middle_base:
                snap_name = "%s.%s1" % (diskname, postfix_n)
                blk_source = os.path.join(tmp_dir, snap_name)
            blockcommit_options += " --base %s" % blk_source
        if len(bandwidth):
            blockcommit_options += " --bandwidth %s" % bandwidth
        if bandwidth_byte:
            blockcommit_options += " --bytes"
        if top_inactive:
            snap_name = "%s.%s2" % (diskname, postfix_n)
            top_image = os.path.join(tmp_dir, snap_name)
            if reuse_external_snapshot:
                index = len(snapshot_external_disks) - 2
                top_image = snapshot_external_disks[index]
            blockcommit_options += " --top %s" % top_image
        else:
            blockcommit_options += " --active"
            if pivot_opt:
                blockcommit_options += " --pivot"

        if from_top_without_active_option:
            blockcommit_options = blockcommit_options.replace("--active", "")

        if top_to_middle_keep_overlay:
            blockcommit_options = blockcommit_options.replace("--active", "")
            blockcommit_options = blockcommit_options.replace("--pivot", "")
            blockcommit_options += " --keep-overlay"

        if restart_vm_before_commit:
            top = 2
            base = len(snapshot_external_disks)
            blockcommit_options = (
                "--top %s[%d] --base %s[%d] --verbose --wait --keep-relative" %
                (disk_target, top, disk_target, base))
            vm.destroy(gracefully=True)
            vm.start()

        if vm_state == "shut off":
            vm.destroy(gracefully=True)

        if with_active_commit:
            # inactive commit follow active commit will fail with bug 1135339
            cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name,
                                                                blk_target)
            cmd_session = aexpect.ShellSession(cmd)

        if backing_file_relative_path:
            blockcommit_options = "  --active --verbose --shallow --pivot --keep-relative"
            block_commit_index = snapshot_take
            expect_backing_file = False
            # Do block commit using --active
            for count in range(1, snapshot_take):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            if top_inactive:
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                disk_xml = ''
                disk_xmls = vmxml.get_devices(device_type="disk")
                for disk in disk_xmls:
                    if disk.get('device_tag') == 'disk':
                        disk_xml = disk
                        break

                top_index = 1
                try:
                    top_index = disk_xml.backingstore.index
                except AttributeError:
                    pass
                else:
                    top_index = int(top_index)

                block_commit_index = snapshot_take - 1
                expect_backing_file = True
            for count in range(1, block_commit_index):
                # Do block commit with --wait if top_inactive
                if top_inactive:
                    blockcommit_options = ("  --wait --verbose --top vda[%d] "
                                           "--base vda[%d] --keep-relative" %
                                           (top_index, top_index + 1))
                    if not libvirt_version.version_compare(6, 0, 0):
                        top_index = 1
                    else:
                        top_index += 1
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)

            check_chain_backing_files(blk_source_image, expect_backing_file)
            return

        if reuse_external_snapshot and not top_inactive:
            block_commit_index = len(snapshot_external_disks) - 1
            for index in range(block_commit_index):
                # Do block commit with --shallow --wait
                external_blockcommit_options = (
                    "  --shallow --wait --verbose --top %s " %
                    (snapshot_external_disks[index]))

                res = virsh.blockcommit(vm_name, blk_target,
                                        external_blockcommit_options,
                                        **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            # Do blockcommit with top active
            result = virsh.blockcommit(vm_name, blk_target,
                                       blockcommit_options, **virsh_dargs)
            # Check status_error
            libvirt.check_exit_status(result, status_error)
            return

        # Start one thread to check the bandwidth in output
        if bandwidth and bandwidth_byte:
            bandwidth += 'B'
            pool = ThreadPool(processes=1)
            pool.apply_async(
                check_bandwidth_thread,
                (libvirt.check_blockjob, vm_name, blk_target, bandwidth, test))

        # Run test case
        # Active commit does not support on rbd based disk with bug 1200726
        result = virsh.blockcommit(vm_name, blk_target, blockcommit_options,
                                   **virsh_dargs)
        # Check status_error
        libvirt.check_exit_status(result, status_error)

        # Skip check chain file as per test case description
        if restart_vm_before_commit:
            return

        if check_image_file_in_vm:
            check_file_in_vm()

        if result.exit_status and status_error:
            return

        while True:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            if not top_inactive:
                disk_mirror = disk_xml.find('mirror')
                if '--pivot' not in blockcommit_options:
                    if disk_mirror is not None:
                        job_type = disk_mirror.get('job')
                        job_ready = disk_mirror.get('ready')
                        src_element = disk_mirror.find('source')
                        disk_src_file = None
                        for elem in ('file', 'name', 'dev'):
                            elem_val = src_element.get(elem)
                            if elem_val:
                                disk_src_file = elem_val
                                break
                        err_msg = "blockcommit base source "
                        err_msg += "%s not expected" % disk_src_file
                        if '--shallow' in blockcommit_options:
                            if not multiple_chain:
                                if disk_src_file != snap_src_lst[2]:
                                    test.fail(err_msg)
                            else:
                                if disk_src_file != snap_src_lst[3]:
                                    test.fail(err_msg)
                        else:
                            if disk_src_file != blk_source:
                                test.fail(err_msg)
                        if libvirt_version.version_compare(1, 2, 7):
                            # The job attribute mentions which API started the
                            # operation since 1.2.7.
                            if job_type != 'active-commit':
                                test.fail("blockcommit job type '%s'"
                                          " not expected" % job_type)
                            if job_ready != 'yes':
                                # The attribute ready, if present, tracks
                                # progress of the job: yes if the disk is known
                                # to be ready to pivot, or, since 1.2.7, abort
                                # or pivot if the job is in the process of
                                # completing.
                                continue
                            else:
                                logging.debug(
                                    "after active block commit job "
                                    "ready for pivot, the target disk"
                                    " xml is %s", disk_xml)
                                break
                        else:
                            break
                    else:
                        break
                else:
                    if disk_mirror is None:
                        logging.debug(disk_xml)
                        if "--shallow" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            chain_lst.pop(0)
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                test.fail("Domain image backing "
                                          "chain check failed")
                            cmd_result = virsh.blockjob(vm_name,
                                                        blk_target,
                                                        '',
                                                        ignore_status=True,
                                                        debug=True)
                            libvirt.check_exit_status(cmd_result)
                        elif "--base" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            base_index = chain_lst.index(blk_source)
                            chain_lst = chain_lst[base_index:]
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                test.fail("Domain image backing "
                                          "chain check failed")
                        break
                    else:
                        # wait pivot after commit is synced
                        continue
            else:
                logging.debug("after inactive commit the disk xml is: %s" %
                              disk_xml)
                if libvirt_version.version_compare(1, 2, 4):
                    if "--shallow" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        chain_lst.remove(top_image)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            test.fail("Domain image backing chain "
                                      "check failed")
                    elif "--base" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        top_index = chain_lst.index(top_image)
                        base_index = chain_lst.index(blk_source)
                        val_tmp = []
                        for i in range(top_index, base_index):
                            val_tmp.append(chain_lst[i])
                        for i in val_tmp:
                            chain_lst.remove(i)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            test.fail("Domain image backing chain "
                                      "check failed")
                    break
                else:
                    break

        # Check flag files
        if not vm_state == "shut off" and not multiple_chain:
            for flag in snapshot_flag_files:
                status, output = session.cmd_status_output("cat %s" % flag)
                if status:
                    test.fail("blockcommit failed: %s" % output)

        if not pivot_opt and snap_in_mirror:
            # do snapshot during mirror phase
            snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
            snap_opt = "--disk-only --atomic --no-metadata "
            snap_opt += "vda,snapshot=external,file=%s" % snap_path
            snapshot_external_disks.append(snap_path)
            cmd_result = virsh.snapshot_create_as(vm_name,
                                                  snap_opt,
                                                  ignore_statues=True,
                                                  debug=True)
            libvirt.check_exit_status(cmd_result, snap_in_mirror_err)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Clean ceph image if used in test
        if 'mon_host' in locals():
            if utils_package.package_install(["ceph-common"]):
                disk_source_name = params.get("disk_source_name")
                cmd = ("rbd -m {0} info {1} && rbd -m {0} rm "
                       "{1}".format(mon_host, disk_source_name))
                cmd_result = process.run(cmd, ignore_status=True, shell=True)
                logging.debug("result of rbd removal: %s", cmd_result)
            else:
                logging.debug('Failed to install ceph-common to clean ceph.')
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")

        # Remove ceph configure file if created
        if ceph_cfg:
            os.remove(ceph_cfg)

        if cmd_session:
            cmd_session.close()
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)

        if backing_file_relative_path or reuse_external_snapshot:
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
            if blk_source_folder:
                process.run("cd %s && rm -rf b c d" % blk_source_folder,
                            shell=True)
        if disk_src_protocol == 'iscsi' or 'iscsi_target' in locals():
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            gluster.setup_or_cleanup_gluster(False,
                                             brick_path=brick_path,
                                             **params)
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)
        # Recover images xattr if having some
        dirty_images = get_images_with_xattr(vm)
        if dirty_images:
            clean_images_with_xattr(dirty_images)
            test.fail("VM's image(s) having xattr left")
Exemple #8
0
def create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=True):
    """
    Setup vm ceph disk with given parameters

    :param vm: the vm object
    :param params: dict, dict include setup vm disk xml configurations
    :param is_setup: one parameter indicate whether setup or clean up
    """
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
    logging.debug("original xml is: %s", vmxml)

    # Device related configurations
    device_format = params.get("virt_disk_device_format", "raw")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdb")
    hotplug = "yes" == params.get("virt_disk_device_hotplug", "no")
    keep_raw_image_as = "yes" == params.get("keep_raw_image_as", "no")

    # Ceph related configurations
    ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
    ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
    ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
    ceph_client_name = params.get("ceph_client_name")
    ceph_client_key = params.get("ceph_client_key")
    ceph_auth_user = params.get("ceph_auth_user")
    ceph_auth_key = params.get("ceph_auth_key")
    auth_sec_usage_type = params.get("ceph_auth_sec_usage_type", "ceph")
    storage_size = params.get("storage_size", "1G")
    img_file = params.get("ceph_image_file")
    attach_option = params.get("virt_device_attach_option", "--live")
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    key_opt = ""
    is_local_img_file = True if img_file is None else False
    rbd_key_file = None

    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    disk_auth_dict = None
    auth_sec_uuid = None
    names = ceph_disk_name.split('/')
    pool_name = names[0]
    image_name = names[1]
    if not utils_package.package_install(["ceph-common"]):
        raise exceptions.TestError("Failed to install ceph-common")

    # Create config file if it doesn't exist
    ceph_cfg = ceph.create_config_file(ceph_mon_ip)
    # If enable auth, prepare a local file to save key
    if ceph_client_name and ceph_client_key:
        with open(key_file, 'w') as f:
            f.write("[%s]\n\tkey = %s\n" %
                    (ceph_client_name, ceph_client_key))
        key_opt = "--keyring %s" % key_file
        rbd_key_file = key_file
    if is_setup:
        # If enable auth, prepare disk auth
        if ceph_client_name and ceph_client_key:
            auth_sec_uuid = _create_secret(auth_sec_usage_type, ceph_auth_key)
            disk_auth_dict = {"auth_user": ceph_auth_user,
                              "secret_type": auth_sec_usage_type,
                              "secret_uuid": auth_sec_uuid}
        # clean up image file if exists
        ceph.rbd_image_rm(ceph_mon_ip, pool_name,
                          image_name, keyfile=rbd_key_file)

        #Create necessary image file if not exists
        _create_image(device_format, img_file, vm.name, storage_size,
                      ceph_disk_name, ceph_mon_ip, key_opt,
                      ceph_auth_user, ceph_auth_key)

        # Disk related config
        disk_src_dict = {"attrs": {"protocol": "rbd",
                                   "name": ceph_disk_name},
                         "hosts":  [{"name": ceph_mon_ip,
                                     "port": ceph_host_port}]}
        # Create network disk
        disk_xml = libvirt_disk.create_primitive_disk_xml("network", device, device_target, device_bus,
                                                          device_format, disk_src_dict, disk_auth_dict)
        if not keep_raw_image_as:
            if hotplug:
                virsh.attach_device(vm.name, disk_xml.xml,
                                    flagstr=attach_option, ignore_status=False, debug=True)
            else:
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
                vmxml.add_device(disk_xml)
                vmxml.sync()
    else:
        ceph.rbd_image_rm(ceph_mon_ip, pool_name,
                          image_name, keyfile=rbd_key_file)
        # Remove ceph config and key file if created.
        for file_path in [ceph_cfg, key_file]:
            if os.path.exists(file_path):
                os.remove(file_path)
        if is_local_img_file and img_file and os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid, ignore_status=True)