Beispiel #1
0
    def get_target_image(self):
        params = self.parser_test_args()
        target_image = storage.get_image_filename(params, self.data_dir)
        if params.get("image_type") == "nfs":
            image = nfs.Nfs(params)
            image.setup()
            utils_misc.wait_for(lambda: os.path.ismount(image.mount_dir),
                                timeout=30)
        elif params.get("image_type") == "iscsi":
            image = qemu_storage.Iscsidev(params, self.data_dir,
                                          params["target_image"])
            return image.setup()

        if (params["create_mode"] == "existing"
                and not os.path.exists(target_image)):
            image = qemu_storage.QemuImg(params, self.data_dir,
                                         params["target_image"])
            image.create(params)

        return target_image
Beispiel #2
0
def run(test, params, env):
    """
    Test block mirroring functionality

    1). boot vm, then mirror $source_image to $target_image
    2). wait for mirroring job go into ready status
    3). pause vm after vm in ready status
    4). reopen $target_image file
    5). compare $source image and $target_image file
    6). resume vm
    7). boot vm from $target_image and check vm is alive if necessary

    "qemu-img compare" is used to verify disk is mirrored successfully.
    """
    tag = params.get("source_image", "image1")
    qemu_img = qemu_storage.QemuImg(params, data_dir.get_data_dir(), tag)
    mirror_test = drive_mirror.DriveMirror(test, params, env, tag)
    try:
        source_image = mirror_test.get_image_file()
        target_image = mirror_test.get_target_image()
        mirror_test.start()
        mirror_test.action_when_steady()
        mirror_test.vm.pause()
        mirror_test.reopen()
        mirror_test.action_after_reopen()
        device_id = mirror_test.vm.get_block({"file": target_image})
        if device_id != mirror_test.device:
            raise error.TestError("Mirrored image not being used by guest")
        error.context("Compare fully mirrored images", logging.info)
        qemu_img.compare_images(source_image, target_image, force_share=True)
        mirror_test.vm.resume()
        if params.get("boot_target_image", "no") == "yes":
            mirror_test.vm.destroy()
            params = params.object_params(tag)
            if params.get("image_type") == "iscsi":
                params["image_raw_device"] = "yes"
            env_process.preprocess_vm(test, params, env, params["main_vm"])
            mirror_test = drive_mirror.DriveMirror(test, params, env, tag)
        mirror_test.verify_alive()
    finally:
        mirror_test.clean()
        def clean(self):
            if self.copier_pid:
                try:
                    if self.vm_guest.is_alive():
                        session = self.vm_guest.wait_for_login(
                            timeout=login_timeout)
                        session.cmd("kill -9 %s" % (self.copier_pid))
                except:
                    logging.warn("It was impossible to stop copier. Something "
                                 "probably happened with GUEST or NFS server.")

            if params.get("kill_vm") == "yes":
                if self.vm_guest.is_alive():
                    self.vm_guest.destroy()
                    utils_misc.wait_for(lambda: self.vm_guest.is_dead(), 30, 2,
                                        2, "Waiting for dying of guest.")
                qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                                mount_path, None)
                qemu_img.check_image(self.image2_vm_guest_params, mount_path)

            self.clean_test()
Beispiel #4
0
def run(test, params, env):
    """
    1) Synchronize disk and then do continuous backup

    "qemu-img compare" is used to verify disk is mirrored successfully.
    """
    tag = params.get("source_images", "image1")
    qemu_img = qemu_storage.QemuImg(params, data_dir.get_data_dir(), tag)
    mirror_test = drive_mirror.DriveMirror(test, params, env, tag)
    tmp_dir = params.get("tmp_dir", "c:\\")
    clean_cmd = params.get("clean_cmd", "del /f /s /q tmp*.file")
    dd_cmd = "dd if=/dev/zero bs=1024 count=1024 of=tmp%s.file"
    dd_cmd = params.get("dd_cmd", dd_cmd)
    try:
        source_image = mirror_test.get_image_file()
        target_image = mirror_test.get_target_image()
        error.context("start mirror block device", logging.info)
        mirror_test.start()
        error.context("Wait mirror job in steady status", logging.info)
        mirror_test.wait_for_steady()
        error.context("Testing continuous backup", logging.info)
        session = mirror_test.get_session()
        error.context("Continuous create file in guest", logging.info)
        session.cmd("cd %s" % tmp_dir)
        for fn in range(0, 128):
            session.cmd(dd_cmd % fn)
        error.context("pause vm and sync host cache", logging.info)
        time.sleep(3)
        mirror_test.vm.pause()
        utils.system("sync")
        time.sleep(3)
        error.context("Compare original and backup images", logging.info)
        qemu_img.compare_images(source_image, target_image)
        mirror_test.vm.resume()
        session = mirror_test.get_session()
        session.cmd("cd %s" % tmp_dir)
        session.cmd(clean_cmd)
        mirror_test.vm.destroy()
    finally:
        mirror_test.clean()
def run(test, params, env):
    """
    Run an gluster test.
    steps:
    1) create gluster brick if there is no one with good name
    2) create volume on brick
    3) create VM image on disk with specific format
    4) install vm on VM image
    5) boot VM
    6) start fio test on booted VM

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    image_name = params.get("image_name")
    timeout = int(params.get("login_timeout", 360))
    # Workaroud wrong config file order.
    params['image_name_backing_file_snapshot'] = params.get("image_name")
    params['image_format_backing_file_snapshot'] = params.get("image_format")
    params['image_name_snapshot'] = params.get("image_name") + "-snap"

    error_context.context("boot guest over glusterfs", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    vm.wait_for_login(timeout=timeout)
    error_context.context("shutdown VM", logging.info)
    vm.destroy()
    error_context.context("create snapshot of vm disk", logging.info)

    snapshot_params = params.object_params("snapshot")

    base_dir = params.get("images_base_dir", data_dir.get_data_dir())
    image = qemu_storage.QemuImg(snapshot_params, base_dir, image_name)
    image.create(snapshot_params)

    env_process.process(test, snapshot_params, env,
                        env_process.preprocess_image,
                        env_process.preprocess_vm)
Beispiel #6
0
def run(test, params, env):
    """Convert from/to luks image."""
    vm = img_utils.boot_vm_with_images(test, params, env)
    session = vm.wait_for_login()
    guest_temp_file = params["guest_temp_file"]
    md5sum_bin = params.get("md5sum_bin", "md5sum")
    sync_bin = params.get("sync_bin", "sync")

    logging.debug("Create temporary file on guest: %s", guest_temp_file)
    img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, sync_bin)

    logging.debug("Get md5 value of the temporary file")
    md5_value = img_utils.check_md5sum(guest_temp_file, md5sum_bin, session)
    session.close()
    vm.destroy()

    root_dir = data_dir.get_data_dir()
    convert_source = params["convert_source"]
    convert_target = params["convert_target"]
    source_params = params.object_params(convert_source)
    source = qemu_storage.QemuImg(source_params, root_dir, convert_source)
    cache_mode = params.get("cache_mode")
    logging.debug("Convert from %s to %s", convert_source, convert_target)
    fail_on((process.CmdError,))(source.convert)(
        source_params, root_dir, cache_mode=cache_mode)

    logging.debug("Compare images: %s and %s", convert_source, convert_target)
    img_utils.qemu_img_compare(params, convert_source, convert_target)

    vm = img_utils.boot_vm_with_images(test, params, env, (convert_target,))
    session = vm.wait_for_login()
    logging.debug("Verify md5 value of the temporary file")
    img_utils.check_md5sum(guest_temp_file, md5sum_bin, session,
                           md5_value_to_check=md5_value)
    session.close()
    vm.destroy()
Beispiel #7
0
def test_disk_format(test, vm, params):
    """
    Test command disk-format
    """
    add_ref = params.get("gf_add_ref", "disk")
    readonly = "yes" == params.get("gf_add_readonly")

    gf = utils_test.libguestfs.GuestfishTools(params)
    if add_ref == "disk":
        image_path = params.get("image_path")
        gf.add_drive_opts(image_path, readonly=readonly)
    elif add_ref == "domain":
        vm_name = params.get("main_vm")
        gf.add_domain(vm_name, readonly=readonly)
    gf.run()

    image_dir = params.get("img_dir", data_dir.get_tmp_dir())
    image_name = params.get('image_name')
    image_format = params["image_format"]
    params['image_name'] = 'test'
    support_format = ['raw', 'cow', 'qcow', 'qcow2', 'vdi', 'vmdk', 'vpc']

    for i in support_format:
        params['image_format'] = i
        image = qemu_storage.QemuImg(params, image_dir, '')
        image_path, _ = image.create(params)
        result = gf.disk_format(image_path).stdout.strip()
        os.remove(image_path)

        if result != i:
            gf.close_session()
            test.fail("Format is %s, expected is %s" % (result, i))

    gf.close_session()
    params['image_name'] = image_name
    params["image_format"] = image_format
Beispiel #8
0
 def start_test(self):
     """
     Main function to run the negative test of rebase:
     """
     rebase_chain = self.params.get("rebase_list", "").split(";")
     self.setup_lvm()
     try:
         error_context.context("Create images on lvms", logging.info)
         for image_name in self.params.objects("images"):
             filename = "%s/%s" % (
                 self.data_dir, self.params["image_name_%s" % image_name])
             self.params.update({
                 "image_name_%s" % image_name:
                 filename,
                 "image_size_%s" % image_name:
                 self.params["lv_size"]
             })
             t_params = self.params.object_params(image_name)
             qemu_image = qemu_storage.QemuImg(t_params, self.data_dir,
                                               image_name)
             logging.info("Create image('%s') on %s." %
                          (image_name, qemu_image.storage_type))
             qemu_image.create(t_params)
         error_context.context("Rebase snapshot to backingfile",
                               logging.info)
         for images in rebase_chain:
             output = ""
             cache_mode = self.params.get("cache_mode")
             images = map(lambda x: x.strip(), images.split(">"))
             try:
                 image = images[0]
                 base = images[1]
             except IndexError:
                 msg = "Invalid format of'rebase_chain' params \n"
                 msg += "format like: 'image > base;image> base2'"
                 raise error.TestError(msg)
             negtive_test = self.params.get("negtive_test_%s" % image, "no")
             self.params["image_chain"] = " ".join([base, image])
             self.params["base_image_filename"] = image
             t_params = self.params.object_params(image)
             rebase_test = qemu_storage.QemuImg(t_params, self.data_dir,
                                                image)
             try:
                 rebase_test.rebase(t_params, cache_mode)
                 if negtive_test == "yes":
                     msg = "Fail to trigger negative image('%s') rebase" % image
                     raise error.TestFail(msg)
             except process.CmdError, err:
                 output = err.result.stderr
                 logging.info("Rebase image('%s') failed: %s." %
                              (image, output))
                 if negtive_test == "no":
                     msg = "Fail to rebase image('%s'): %s" % (image,
                                                               output)
                     raise error.TestFail(msg)
                 if "(core dumped)" in output:
                     msg = "qemu-img core dumped when change"
                     msg += " image('%s') backing file to %s" % (image,
                                                                 base)
                     raise error.TestFail(msg)
             image_info = rebase_test.info()
             if not image_info:
                 msg = "Fail to get image('%s') info" % image
                 raise error.TestFail(msg)
             backingfile = re.search(r'backing file: +(.*)', image_info,
                                     re.M)
             base_name = rebase_test.base_image_filename
             if not output:
                 if not backingfile:
                     msg = "Expected backing file: %s" % base_name
                     msg += " Actual backing file is null!"
                     raise error.TestFail(msg)
                 elif base_name not in backingfile.group(0):
                     msg = "Expected backing file: %s" % base_name
                     msg += " Actual backing file: %s" % backingfile
                     raise error.TestFail(msg)
     finally:
         try:
             self.lvmdevice.cleanup()
         except Exception:
             logging.error("Failed to remove useless lv, vg and pv")
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_state = params.get("vm_state", "running")
    image_format = params.get("snapshot_image_format", "qcow2")
    snapshot_del_test = "yes" == params.get("snapshot_del_test", "no")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no")

    # Pool variables.
    snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image", "emulated-image")
    vol_format = params.get("vol_format")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    options = params.get("snapshot_options", "")
    export_options = params.get("export_options", "rw,no_root_squash,fsid=0")

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in params.keys():
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    supported_pool_list = [
        "dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster"
    ]
    if snapshot_with_pool:
        if pool_type not in supported_pool_list:
            raise error.TestNAError("%s not in support list %s" %
                                    (pool_target, supported_pool_list))

    # Do xml backup for final recovery
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")
    no_memory_snap = "yes" == params.get("no_memory_snap", "no")

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if vol_format == "qed" or image_format == "qed":
            raise error.TestNAError("QED support changed, check bug: "
                                    "https://bugzilla.redhat.com/show_bug.cgi"
                                    "?id=731570")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_source_protocol == 'gluster':
            raise error.TestNAError("Snapshot on glusterfs not support in "
                                    "current version. Check more info with "
                                    "https://bugzilla.redhat.com/buglist.cgi?"
                                    "bug_id=1017289,1032370")

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    snapshot_xml_path = None
    del_status = None
    image = None
    pvt = None
    # Get a tmp dir
    snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name
    try:
        if replace_vm_disk:
            utlv.set_vm_disk(vm, params, tmp_dir)
            if multi_gluster_disks:
                new_params = params.copy()
                new_params["pool_name"] = "gluster-pool2"
                new_params["vol_name"] = "gluster-vol2"
                new_params["disk_target"] = "vdf"
                new_params["image_convert"] = 'no'
                utlv.set_vm_disk(vm, new_params, tmp_dir)

        if snapshot_with_pool:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name,
                         pool_type,
                         pool_target,
                         emulated_image,
                         image_size="1G",
                         pre_disk_vol=["20M"],
                         source_name=vol_name,
                         export_options=export_options)

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = pv.list_volumes().keys()
                if vols:
                    vol_name = vols[0]
                else:
                    raise error.TestNAError("No volume in pool: %s" %
                                            pool_name)
            else:
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name,
                                              vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    raise error.TestNAError("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                raise error.TestNAError("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["logical", "iscsi", "disk"]:
                # Use qemu-img to format logical, iscsi and disk block device
                if vol_format != "raw":
                    cmd = "qemu-img create -f %s %s 10M" % (vol_format,
                                                            img_path)
                    cmd_result = utils.run(cmd, ignore_status=True)
                    if cmd_result.exit_status:
                        raise error.TestNAError("Failed to format volume, %s" %
                                                cmd_result.stdout.strip())
            extra = "--persistent --subdriver %s" % vol_format
        else:
            # Create a image.
            params['image_name'] = "snapshot_test"
            params['image_format'] = image_format
            params['image_size'] = "1M"
            image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
            img_path, _ = image.create(params)
            extra = "--persistent --subdriver %s" % image_format

        if not multi_gluster_disks:
            # Do the attach action.
            out = utils.run("qemu-img info %s" % img_path)
            logging.debug("The img info is:\n%s" % out.stdout.strip())
            result = virsh.attach_disk(vm_name,
                                       source=img_path,
                                       target="vdf",
                                       extra=extra,
                                       debug=True)
            if result.exit_status:
                raise error.TestNAError("Failed to attach disk %s to VM."
                                        "Detail: %s." %
                                        (img_path, result.stderr))

        # Create snapshot.
        if snapshot_from_xml:
            snap_xml = libvirt_xml.SnapshotXML()
            snapshot_name = "snapshot_test"
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test"
            if not no_memory_snap:
                if "--disk-only" not in options:
                    if snapshot_memory == "external":
                        memory_external = os.path.join(tmp_dir,
                                                       "snapshot_memory")
                        snap_xml.mem_snap_type = snapshot_memory
                        snap_xml.mem_file = memory_external
                        snapshot_external_disk.append(memory_external)
                    else:
                        snap_xml.mem_snap_type = snapshot_memory

            # Add all disks into xml file.
            vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = snapshot_disk
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                if snapshot_disk == 'external':
                    new_attrs = disk_xml.source.attrs
                    if disk_xml.source.attrs.has_key('file'):
                        new_file = "%s.snap" % disk_xml.source.attrs['file']
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None
                    elif disk_xml.source.attrs.has_key('name'):
                        new_name = "%s.snap" % disk_xml.source.attrs['name']
                        new_attrs.update({'name': new_name})
                        hosts = disk_xml.source.hosts
                    elif (disk_xml.source.attrs.has_key('dev')
                          and disk_xml.type_name == 'block'):
                        # Use local file as external snapshot target for block type.
                        # As block device will be treat as raw format by default,
                        # it's not fit for external disk snapshot target. A work
                        # around solution is use qemu-img again with the target.
                        disk_xml.type_name = 'file'
                        del new_attrs['dev']
                        new_file = "%s/blk_src_file.snap" % tmp_dir
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                    new_src_dict = {"attrs": new_attrs}
                    if hosts:
                        new_src_dict.update({"hosts": hosts})
                    disk_xml.source = disk_xml.new_disk_source(**new_src_dict)
                else:
                    del disk_xml.source

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options += " --xmlfile %s " % snapshot_xml_path

            if vm_state == "shut off":
                vm.destroy(gracefully=False)

            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)
            out_err = snapshot_result.stderr.strip()
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    if re.search(
                            "live disk snapshot not supported with this "
                            "QEMU binary", out_err):
                        raise error.TestNAError(out_err)

                    if libvirt_version.version_compare(1, 2, 5):
                        # As commit d2e668e in 1.2.5, internal active snapshot
                        # without memory state is rejected. Handle it as SKIP
                        # for now. This could be supportted in future by bug:
                        # https://bugzilla.redhat.com/show_bug.cgi?id=1103063
                        if re.search(
                                "internal snapshot of a running VM" +
                                " must include the memory state", out_err):
                            raise error.TestNAError("Check Bug #1083345, %s" %
                                                    out_err)

                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." % out_err)
        else:
            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." %
                        snapshot_result.stderr.strip())
            snapshot_name = re.search("\d+",
                                      snapshot_result.stdout.strip()).group(0)

            if snapshot_current:
                snap_xml = libvirt_xml.SnapshotXML()
                new_snap = snap_xml.new_from_snapshot_dumpxml(
                    vm_name, snapshot_name)
                # update an element
                new_snap.creation_time = snapshot_name
                snapshot_xml_path = new_snap.xml
                options += "--redefine %s --current" % snapshot_xml_path
                snapshot_result = virsh.snapshot_create(vm_name,
                                                        options,
                                                        debug=True)
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            if not snapshot_del_test:
                raise error.TestFail("Success to create snapshot in negative"
                                     " case\nDetail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path
        status, output = session.cmd_status_output(echo_cmd)
        logging.debug("The echo output in domain is: '%s'", output)
        if status:
            raise error.TestFail("'%s' run failed with '%s'" %
                                 (tmp_file_path, output))
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("File created with content: '%s'", output)

        session.close()

        # As only internal snapshot revert works now, let's only do revert
        # with internal, and move the all skip external cases back to pass.
        # After external also supported, just move the following code back.
        if snapshot_disk == 'internal':
            # Destroy vm for snapshot revert.
            if not libvirt_version.version_compare(1, 2, 3):
                virsh.destroy(vm_name)
            # Revert snapshot.
            revert_options = ""
            if snapshot_revert_paused:
                revert_options += " --paused"
            revert_result = virsh.snapshot_revert(vm_name,
                                                  snapshot_name,
                                                  revert_options,
                                                  debug=True)
            if revert_result.exit_status:
                # Attempts to revert external snapshots will FAIL with an error
                # "revert to external disk snapshot not supported yet" or "revert
                # to external snapshot not supported yet" since d410e6f. Thus,
                # let's check for that and handle as a SKIP for now. Check bug:
                # https://bugzilla.redhat.com/show_bug.cgi?id=1071264
                if re.search(
                        "revert to external \w* ?snapshot not supported yet",
                        revert_result.stderr):
                    raise error.TestNAError(revert_result.stderr.strip())
                else:
                    raise error.TestFail("Revert snapshot failed. %s" %
                                         revert_result.stderr.strip())

            if vm.is_dead():
                raise error.TestFail("Revert snapshot failed.")

            if snapshot_revert_paused:
                if vm.is_paused():
                    vm.resume()
                else:
                    raise error.TestFail(
                        "Revert command successed, but VM is not "
                        "paused after reverting with --paused"
                        "  option.")
            # login vm.
            session = vm.wait_for_login()
            # Check the result of revert.
            status, output = session.cmd_status_output("cat %s" %
                                                       tmp_file_path)
            logging.debug("After revert cat file output='%s'", output)
            if not status:
                raise error.TestFail("Tmp file exists, revert failed.")

            # Close the session.
            session.close()

        # Test delete snapshot without "--metadata", delete external disk
        # snapshot will fail for now.
        # Only do this when snapshot creat succeed which filtered in cfg file.
        if snapshot_del_test:
            if snapshot_name:
                del_result = virsh.snapshot_delete(vm_name,
                                                   snapshot_name,
                                                   debug=True,
                                                   ignore_status=True)
                del_status = del_result.exit_status
                snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name
                if del_status:
                    if not status_error:
                        raise error.TestFail("Failed to delete snapshot.")
                    else:
                        if not os.path.exists(snap_xml_path):
                            raise error.TestFail(
                                "Snapshot xml file %s missing" % snap_xml_path)
                else:
                    if status_error:
                        err_msg = "Snapshot delete succeed but expect fail."
                        raise error.TestFail(err_msg)
                    else:
                        if os.path.exists(snap_xml_path):
                            raise error.TestFail("Snapshot xml file %s still" %
                                                 snap_xml_path + " exist")

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        if image:
            image.remove()
        if del_status and snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")

        libvirtd = utils_libvirtd.Libvirtd()
        if disk_source_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
            if multi_gluster_disks:
                brick_path = os.path.join(tmp_dir, "gluster-pool2")
                utlv.setup_or_cleanup_gluster(False, "gluster-vol2",
                                              brick_path)
            libvirtd.restart()

        if snapshot_xml_path:
            if os.path.exists(snapshot_xml_path):
                os.unlink(snapshot_xml_path)
        if pvt:
            try:
                pvt.cleanup_pool(pool_name,
                                 pool_type,
                                 pool_target,
                                 emulated_image,
                                 source_name=vol_name)
            except error.TestFail, detail:
                libvirtd.restart()
                logging.error(str(detail))
Beispiel #10
0
def run(test, params, env):
    """
    DiskXML has an attribute named discard for fstrim operations.
    (Only supported after special libvirt version.)
    These are test cases for it:
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    if vm.is_dead():
        vm.start()
        vm.wait_for_login()
    bf_disks = get_vm_disks(vm)
    vm.destroy()

    # Create a new vm for test, undefine it at last
    new_vm_name = "%s_discardtest" % vm.name
    if not utlv.define_new_vm(vm.name, new_vm_name):
        test.error("Define new vm failed.")
    try:
        new_vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                               vm.address_cache)
    except Exception as detail:
        test.error("Create new vm failed:%s" % detail)

    disk_type = params.get("disk_type", "file")
    discard_device = params.get("discard_device", "/DEV/EXAMPLE")
    fstrim_type = params.get("fstrim_type", "fstrim_cmd")
    try:
        if disk_type == "file":
            device_dir = data_dir.get_tmp_dir()
            params["image_name"] = "discard_test"
            params["image_format"] = "raw"
            params["image_size"] = "1G"
            qs = qemu_storage.QemuImg(params, device_dir, "")
            device_path, _ = qs.create(params)
        else:
            if not discard_device.count("/DEV/EXAMPLE"):
                create_iscsi = False
            else:
                create_iscsi = True
                discard_device = create_iscsi_device(test)
            device_path = create_volume(discard_device)

        discard_type = params.get("discard_type", "ignore")
        target_bus = params.get("storage_target_bus", "virtio")
        target_dev = params.get("storage_target_dev", "vdb")
        status_error = "yes" == params.get("status_error", "no")
        xmlfile = create_disk_xml(disk_type, device_path, discard_type,
                                  target_dev, target_bus)
        virsh.attach_device(new_vm_name, xmlfile,
                            flagstr="--persistent", ignore_status=False)
        if fstrim_type == "qemu-guest-agent":
            channelfile = prepare_channel_xml(new_vm_name)
            virsh.attach_device(new_vm_name, channelfile,
                                flagstr="--persistent", ignore_status=False)
        logging.debug("New VMXML:\n%s", virsh.dumpxml(new_vm_name))

        # Verify attached device in vm
        if new_vm.is_dead():
            new_vm.start()
        new_vm.wait_for_login()
        af_disks = get_vm_disks(new_vm)
        logging.debug("\nBefore:%s\nAfter:%s", bf_disks, af_disks)
        # Get new disk name in vm
        new_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not new_disk:
            test.fail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", new_disk)

        # Occupt space of new disk
        frmt_type = params.get("discard_format", "ext4")
        if fstrim_type == "mount_with_discard":
            mount_options = "discard"
        else:
            mount_options = None

        bf_cpy = get_disk_capacity(test, disk_type, imagefile=device_path,
                                   lvname="lvthin")
        logging.debug("Disk size before using:%s", bf_cpy)
        occupy_disk(new_vm, new_disk, "500", frmt_type, mount_options)
        bf_fstrim_cpy = get_disk_capacity(test, disk_type, imagefile=device_path,
                                          lvname="lvthin")
        logging.debug("Disk size after used:%s", bf_fstrim_cpy)
        do_fstrim(test, fstrim_type, new_vm, status_error)
        af_fstrim_cpy = get_disk_capacity(test, disk_type, imagefile=device_path,
                                          lvname="lvthin")
        logging.debug("\nBefore occupying disk:%s\n"
                      "After occupied disk:%s\n"
                      "After fstrim operation:%s",
                      bf_cpy, bf_fstrim_cpy, af_fstrim_cpy)
        # Check results
        if fstrim_type in ["fstrim_cmd", "qemu-guest-agent"]:
            if not sig_delta(bf_fstrim_cpy, af_fstrim_cpy) and \
                    not status_error:
                test.fail("Manual 'fstrims' didn't work.")
        elif fstrim_type == "mount_with_discard":
            if sig_delta(bf_cpy, bf_fstrim_cpy) and not status_error:
                test.fail("Automatic 'fstrims' didn't work.")
    finally:
        if new_vm.is_alive():
            new_vm.destroy()
        new_vm.undefine()
        if disk_type == "block":
            try:
                lv_utils.lv_remove("vgthin", "lvthin")
            except exceptions.TestError as detail:
                logging.debug(str(detail))
            try:
                lv_utils.vg_remove("vgthin")
            except exceptions.TestError as detail:
                logging.debug(str(detail))
            process.run("pvremove -f %s" % discard_device, ignore_status=True,
                        shell=True)
            if create_iscsi:
                utlv.setup_or_cleanup_iscsi(is_setup=False)
Beispiel #11
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': sec_model,
        'label': sec_label,
        'relabel': sec_relabel
    }
    disk_seclabel = params.get("disk_seclabel", "no")
    # Get variables about pool vol
    with_pool_vol = 'yes' == params.get("with_pool_vol", "no")
    check_cap_rawio = "yes" == params.get("check_cap_rawio", "no")
    virt_use_nfs = params.get("virt_use_nfs", "off")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format", "qcow2")
    device_target = params.get("disk_target")
    device_bus = params.get("disk_target_bus")
    device_type = params.get("device_type", "file")
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    sec_disk_dict = {
        'model': sec_model,
        'label': img_label,
        'relabel': sec_relabel
    }
    enable_namespace = 'yes' == params.get('enable_namespace', 'no')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in list(disks.values()):
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    disk_xml = Disk(type_name=device_type)
    disk_xml.device = "disk"
    try:
        # set qemu conf
        if check_cap_rawio:
            qemu_conf.user = '******'
            qemu_conf.group = 'root'
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if with_pool_vol:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            logging.debug("pool_type %s" % pool_type)
            pvt.pre_pool(pool_name,
                         pool_type,
                         pool_target,
                         emulated_image,
                         image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = list(pv.list_volumes().keys())
                vol_format = "raw"
                if vols:
                    vol_name = vols[0]
                else:
                    test.cancel("No volume in pool: %s" % pool_name)
            else:
                vol_arg = {
                    'name': vol_name,
                    'format': vol_format,
                    'capacity': 1073741824,
                    'allocation': 1048576,
                }
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name,
                                              vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    test.cancel("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                test.cancel("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["iscsi", "disk"]:
                source_type = "dev"
                if pool_type == "iscsi":
                    disk_xml.device = "lun"
                    disk_xml.rawio = "yes"
                else:
                    if not enable_namespace:
                        qemu_conf.namespaces = ''
                        logging.debug("the qemu.conf content is: %s" %
                                      qemu_conf)
                        libvirtd.restart()
            else:
                source_type = "file"

            # set host_sestatus as nfs pool will reset it
            utils_selinux.set_status(host_sestatus)
            # set virt_use_nfs
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs,
                                 shell=True)
            if result.exit_status:
                test.cancel("Failed to set virt_use_nfs value")
        else:
            source_type = "file"
            # Init a QemuImg instance.
            params['image_name'] = img_name
            tmp_dir = data_dir.get_tmp_dir()
            image = qemu_storage.QemuImg(params, tmp_dir, img_name)
            # Create a image.
            img_path, result = image.create(params)
            # Set the context of the image.
            if sec_relabel == "no":
                utils_selinux.set_context_of_file(filename=img_path,
                                                  context=img_label)

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        disk_xml.driver = {"name": "qemu", "type": vol_format}
        if disk_seclabel == "yes":
            source_seclabel = []
            sec_xml = seclabel.Seclabel()
            sec_xml.update(sec_disk_dict)
            source_seclabel.append(sec_xml)
            disk_source = disk_xml.new_disk_source(**{
                "attrs": {
                    source_type: img_path
                },
                "seclabels": source_seclabel
            })
        else:
            disk_source = disk_xml.new_disk_source(
                **{"attrs": {
                    source_type: img_path
                }})
            # Set the context of the VM.
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()

        disk_xml.source = disk_source
        logging.debug(disk_xml)

        # Do the attach action.
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml.xml,
                                         flagstr='--persistent')
        libvirt.check_exit_status(cmd_result, expect_error=False)
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                test.fail('Test succeeded in negative case.')

            if check_cap_rawio:
                cap_list = ['CapPrm', 'CapEff', 'CapBnd']
                cap_dict = {}
                pid = vm.get_pid()
                pid_status_path = "/proc/%s/status" % pid
                with open(pid_status_path) as f:
                    for line in f:
                        val_list = line.split(":")
                        if val_list[0] in cap_list:
                            cap_dict[val_list[0]] = int(
                                val_list[1].strip(), 16)

                # bit and with rawio capabilitiy value to check cap_sys_rawio
                # is set
                cap_rawio_val = 0x0000000000020000
                for i in cap_list:
                    if not cap_rawio_val & cap_dict[i]:
                        err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        err_msg += " lack cap_sys_rawio capabilities"
                        test.fail(err_msg)
                    else:
                        inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        inf_msg += " have cap_sys_rawio capabilities"
                        logging.debug(inf_msg)
            if pool_type == "disk":
                if libvirt_version.version_compare(3, 1,
                                                   0) and enable_namespace:
                    vm_pid = vm.get_pid()
                    output = process.system_output(
                        "nsenter -t %d -m -- ls -Z %s" % (vm_pid, img_path))
                else:
                    output = process.system_output('ls -Z %s' % img_path)
                logging.debug("The default label is %s", default_label)
                logging.debug("The label after guest started is %s",
                              to_text(output.strip().split()[-2]))
                if default_label not in to_text(output.strip().split()[-2]):
                    test.fail("The label is wrong after guest started\n")
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                test.fail("Test failed in positive case." "error: %s" % e)

        cmd_result = virsh.detach_device(domainarg=vm_name,
                                         filearg=disk_xml.xml)
        libvirt.check_exit_status(cmd_result, status_error)
    finally:
        # clean up
        vm.destroy()
        if not with_pool_vol:
            image.remove()
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except exceptions.TestFail as detail:
                logging.error(str(detail))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if check_cap_rawio:
            qemu_conf.restore()
            libvirtd.restart()
Beispiel #12
0
 def get_image_param_by_tag(root_dir, tag):
     parms = params.object_params(tag)
     image = qemu_storage.QemuImg(parms, root_dir, tag)
     name = image.image_filename
     return parms, name, image
Beispiel #13
0
def run(test, params, env):
    """
    ENOSPC test

    1) Create a virtual disk on lvm
    2) Boot up guest with two disks
    3) Continually write data to second disk
    4) Check images and extend second disk when no space
    5) Continue paused guest
    6) Repeat step 3~5 several times

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    error.context("Create a virtual disk on lvm")
    enospc_config = EnospcConfig(test, params)
    enospc_config.setup()

    error.context("Boot up guest with two disks")
    vm = env.get_vm(params["main_vm"])
    vm.create()
    login_timeout = int(params.get("login_timeout", 360))
    session_serial = vm.wait_for_serial_login(timeout=login_timeout)

    vgtest_name = params["vgtest_name"]
    lvtest_name = params["lvtest_name"]
    logical_volume = "/dev/%s/%s" % (vgtest_name, lvtest_name)

    drive_format = params["drive_format"]
    output = session_serial.cmd_output("dir /dev")
    devname = "/dev/" + re.findall("([shv]db)\s", output)[0]
    cmd = params["background_cmd"]
    cmd %= devname

    error.context("Continually write data to second disk")
    logging.info("Sending background cmd '%s'", cmd)
    session_serial.sendline(cmd)

    iterations = int(params.get("repeat_time", 40))
    i = 0
    pause_n = 0
    while i < iterations:
        if vm.monitor.verify_status("paused"):
            pause_n += 1
            error.context("Checking all images in use by %s" % vm.name,
                          logging.info)
            for image_name in vm.params.objects("images"):
                image_params = vm.params.object_params(image_name)
                try:
                    image = qemu_storage.QemuImg(image_params,
                                                 data_dir.get_data_dir(), image_name)
                    image.check_image(image_params, data_dir.get_data_dir())
                except (virt_vm.VMError, error.TestWarn), e:
                    logging.error(e)
            error.context("Guest paused, extending Logical Volume size",
                          logging.info)
            try:
                utils.run("lvextend -L +200M %s" % logical_volume)
            except error.CmdError, e:
                logging.debug(e.result_obj.stdout)
            error.context("Continue paused guest", logging.info)
            vm.resume()
Beispiel #14
0
def test_scrub_device(test, vm, params):
    """
    Test command scrub-device
    """
    add_ref = params.get("gf_add_ref", "disk")
    readonly = "yes" == params.get("gf_add_readonly")

    gf = utils_test.libguestfs.GuestfishTools(params)
    if add_ref == "disk":
        image_path = params.get("image_path")
        gf.add_drive_opts(image_path, readonly=readonly)
    elif add_ref == "domain":
        vm_name = params.get("main_vm")
        gf.add_domain(vm_name, readonly=readonly)

    # create other disk has small size
    image_dir = params.get("img_dir", data_dir.get_tmp_dir())
    image_name = params.get("image_name")
    image_format = params["image_format"]
    image_size = params["image_size"]
    params['image_name'] = "scrub_test"
    params['image_size'] = "1G"
    image = qemu_storage.QemuImg(params, image_dir, '')
    image_path, _ = image.create(params)

    gf.add_drive_opts(image_path, readonly=readonly)
    gf.run()

    pv_name = "/dev/sdb"
    part_name = "/dev/sdb1"

    gf.part_disk(pv_name, 'msdos')
    gf.mkfs('ext4', part_name)

    device_info = gf.file(pv_name).stdout.strip()
    logging.debug(device_info)
    part_info = gf.file(part_name).stdout.strip()
    logging.debug(part_info)

    if len(device_info) < 20 and len(part_info) < 20:
        gf.close_session()
        test.fail("Info is not correct")

    # scrub partition, device info should be kept
    gf.scrub_device(part_name)

    device_info_new = gf.file(pv_name).stdout.strip()
    logging.debug(device_info_new)
    if device_info_new != device_info:
        gf.close_session()
        test.fail("Device info should be kept")

    part_info_new = gf.file(part_name).stdout.strip()
    logging.debug(part_info_new)
    if part_info_new != "data":
        gf.close_session()
        test.fail("Scrub partition failed")

    # scrub the whole device
    gf.scrub_device(pv_name)

    device_info_new = gf.file(pv_name).stdout.strip()
    logging.debug(device_info_new)
    if device_info_new != 'data':
        gf.close_session()
        test.fail("Scrub device failed")

    gf.close_session()
    params['image_name'] = image_name
    params['image_size'] = image_size
Beispiel #15
0
def run(test, params, env):
    """
    Test per-image DAC disk hotplug to VM.

    (1).Init variables for test.
    (2).Creat disk xml with per-image DAC
    (3).Start VM
    (4).Attach the disk to VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get per-image DAC setting
    vol_name = params.get('vol_name')
    target_dev = params.get('target_dev')
    disk_type_name = params.get("disk_type_name")
    img_user = params.get("img_user")
    img_group = params.get("img_group")
    relabel = 'yes' == params.get('relabel', 'yes')

    if not libvirt_version.version_compare(1, 2, 7):
        raise error.TestNAError("per-image DAC only supported on version 1.2.7"
                                " and after.")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)

    img_path = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_group
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        owner_str = format_user_group_str(qemu_user, qemu_group)
        src_usr, src_grp = owner_str.split(':')
        os.chown(blk_source, int(src_usr), int(src_grp))
        vm.start()

        # Init a QemuImg instance and create a img.
        params['image_name'] = vol_name
        tmp_dir = data_dir.get_tmp_dir()
        image = qemu_storage.QemuImg(params, tmp_dir, vol_name)
        # Create a image.
        img_path, result = image.create(params)

        # Create disk xml for attach.
        params['source_file'] = img_path
        sec_label = "%s:%s" % (img_user, img_group)
        params['sec_label'] = sec_label
        params['type_name'] = disk_type_name
        sec_label_id = format_user_group_str(img_user, img_group)

        disk_xml = utlv.create_disk_xml(params)

        # Change img file to qemu:qemu and 660 mode
        os.chown(img_path, 107, 107)
        os.chmod(img_path, 432)

        img_label_before = check_ownership(img_path)
        if img_label_before:
            logging.debug("the image ownership before "
                          "attach: %s" % img_label_before)

        # Do the attach action.
        option = "--persistent"
        result = virsh.attach_device(vm_name,
                                     filearg=disk_xml,
                                     flagstr=option,
                                     debug=True)
        utlv.check_exit_status(result, status_error)

        if not result.exit_status:
            img_label_after = check_ownership(img_path)
            if dynamic_ownership and relabel:
                if img_label_after != sec_label_id:
                    raise error.TestFail("The image dac label %s is not "
                                         "expected." % img_label_after)

            ret = virsh.detach_disk(vm_name,
                                    target=target_dev,
                                    extra=option,
                                    debug=True)
            utlv.check_exit_status(ret, status_error)
    finally:
        # clean up
        vm.destroy()
        qemu_conf.restore()
        vmxml.sync()
        libvirtd.restart()
        if img_path and os.path.exists(img_path):
            os.unlink(img_path)
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    image_format = params.get("snapshot_image_format", "qcow2")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))

    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")

    # Get a tmp_dir.
    tmp_dir = data_dir.get_tmp_dir()
    # Create a image.
    params['image_name'] = "snapshot_test"
    params['image_format'] = image_format
    params['image_size'] = "1M"
    image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
    img_path, _ = image.create(params)
    # Do the attach action.
    result = virsh.attach_disk(vm_name,
                               source=img_path,
                               target="vdf",
                               extra="--persistent --subdriver %s" %
                               image_format)
    if result.exit_status:
        raise error.TestNAError("Failed to attach disk %s to VM."
                                "Detail: %s." % (img_path, result.stderr))

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    try:
        # Create snapshot.
        if snapshot_from_xml:
            snapshot_name = "snapshot_test"
            lines = [
                "<domainsnapshot>\n",
                "<name>%s</name>\n" % snapshot_name,
                "<description>Snapshot Test</description>\n"
            ]
            if snapshot_memory == "external":
                memory_external = os.path.join(tmp_dir, "snapshot_memory")
                snapshot_external_disk.append(memory_external)
                lines.append("<memory snapshot=\'%s\' file='%s'/>\n" %
                             (snapshot_memory, memory_external))
            else:
                lines.append("<memory snapshot='%s'/>\n" % snapshot_memory)

            # Add all disks into xml file.
            disks = vm.get_disk_devices().values()
            lines.append("<disks>\n")
            for disk in disks:
                lines.append("<disk name='%s' snapshot='%s'>\n" %
                             (disk['source'], snapshot_disk))
                if snapshot_disk == "external":
                    disk_external = os.path.join(
                        tmp_dir, "%s.snap" % os.path.basename(disk['source']))
                    snapshot_external_disk.append(disk_external)
                    lines.append("<source file='%s'/>\n" % disk_external)
                lines.append("</disk>\n")
            lines.append("</disks>\n")
            lines.append("</domainsnapshot>")

            snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
            snapshot_xml_file = open(snapshot_xml_path, "w")
            snapshot_xml_file.writelines(lines)
            snapshot_xml_file.close()
            snapshot_result = virsh.snapshot_create(
                vm_name, ("--xmlfile %s" % snapshot_xml_path))
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." %
                        snapshot_result.stderr.strip())
        else:
            options = ""
            snapshot_result = virsh.snapshot_create(vm_name, options)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." %
                        snapshot_result.stderr.strip())
            snapshot_name = re.search("\d+",
                                      snapshot_result.stdout.strip()).group(0)
            if snapshot_current:
                lines = [
                    "<domainsnapshot>\n",
                    "<description>Snapshot Test</description>\n",
                    "<state>running</state>\n",
                    "<creationTime>%s</creationTime>" % snapshot_name,
                    "</domainsnapshot>"
                ]
                snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
                snapshot_xml_file = open(snapshot_xml_path, "w")
                snapshot_xml_file.writelines(lines)
                snapshot_xml_file.close()
                options += "--redefine %s --current" % snapshot_xml_path
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            raise error.TestFail(
                "Success to create snapshot in negative case\n"
                "Detail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        status, output = session.cmd_status_output("touch %s" % tmp_file_path)
        if status:
            raise error.TestFail("Touch file in vm failed. %s" % output)

        session.close()

        # Destroy vm for snapshot revert.
        virsh.destroy(vm_name)
        # Revert snapshot.
        revert_options = ""
        if snapshot_revert_paused:
            revert_options += " --paused"
        revert_result = virsh.snapshot_revert(vm_name, snapshot_name,
                                              revert_options)
        if revert_result.exit_status:
            raise error.TestFail("Revert snapshot failed. %s" %
                                 revert_result.stderr.strip())

        if vm.is_dead():
            raise error.TestFail("Revert snapshot failed.")

        if snapshot_revert_paused:
            if vm.is_paused():
                vm.resume()
            else:
                raise error.TestFail(
                    "Revert command successed, but VM is not "
                    "paused after reverting with --paused option.")
        # login vm.
        session = vm.wait_for_login()
        # Check the result of revert.
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        if not status:
            raise error.TestFail("Tmp file exists, revert failed.")

        # Close the session.
        session.close()

    finally:
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        image.remove()
        if snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
Beispiel #17
0
def run(test, params, env):
    """
    Convert remote image.

    1) Start VM and create a tmp file, record its md5sum, shutdown VM
    2) Convert image
    3) Start VM by the converted image and then check the md5sum
    """
    def _check_file(boot_image, md5_value):
        logging.debug("Check md5sum.")
        vm = img_utils.boot_vm_with_images(test, params, env, (boot_image, ))
        session = vm.wait_for_login()
        guest_temp_file = params["guest_temp_file"]
        md5sum_bin = params.get("md5sum_bin", "md5sum")
        img_utils.check_md5sum(guest_temp_file,
                               md5sum_bin,
                               session,
                               md5_value_to_check=md5_value)
        session.close()
        vm.destroy()

    vm = img_utils.boot_vm_with_images(test, params, env)
    session = vm.wait_for_login()
    guest_temp_file = params["guest_temp_file"]
    md5sum_bin = params.get("md5sum_bin", "md5sum")
    sync_bin = params.get("sync_bin", "sync")

    logging.info("Create temporary file on guest: %s", guest_temp_file)
    img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, sync_bin)

    md5_value = img_utils.check_md5sum(guest_temp_file, md5sum_bin, session)
    logging.info("Get md5 value of the temporary file: %s", md5_value)

    session.close()
    vm.destroy()

    root_dir = data_dir.get_data_dir()

    # Make a list of all source and target image pairs
    img_pairs = [(params["convert_source"], params["convert_target"])]
    if params.get("convert_target_remote"):
        # local -> remote
        img_pairs.append(
            (params["convert_target"], params["convert_target_remote"]))

    # Convert images
    for source, target in img_pairs:
        params["convert_source"] = source
        params["convert_target"] = target

        source_params = params.object_params(source)
        target_params = params.object_params(target)

        source_image = qemu_storage.QemuImg(source_params, root_dir, source)
        target_image = qemu_storage.QemuImg(target_params, root_dir, target)

        # remove the target
        target_filename = storage.get_image_filename(target_params, root_dir)
        storage.file_remove(target_params, target_filename)

        # Convert source to target
        cache_mode = params.get("cache_mode")
        source_cache_mode = params.get("source_cache_mode")
        logging.info("Convert %s to %s", source, target)
        fail_on((process.CmdError, ))(source_image.convert)(
            params,
            root_dir,
            cache_mode=cache_mode,
            source_cache_mode=source_cache_mode)

        _check_file(target, md5_value)

    # Remove images converted
    for _, target in img_pairs:
        target_params = params.object_params(target)
        target_image = qemu_storage.QemuImg(target_params, root_dir, target)
        target_image.remove()
Beispiel #18
0
 def get_image_by_tag(self, name):
     image_dir = data_dir.get_data_dir()
     image_params = self.params.object_params(name)
     return qemu_storage.QemuImg(image_params, image_dir, name)
Beispiel #19
0
def run_svirt_attach_disk(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': sec_model,
        'label': sec_label,
        'relabel': sec_relabel
    }
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in disks.values():
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)
    # Init a QemuImg instance.
    params['image_name'] = img_name
    tmp_dir = data_dir.get_tmp_dir()
    image = qemu_storage.QemuImg(params, tmp_dir, img_name)
    # Create a image.
    img_path, result = image.create(params)
    # Set the context of the image.
    utils_selinux.set_context_of_file(filename=img_path, context=img_label)
    # Set the context of the VM.
    vmxml.set_seclabel(sec_dict)
    vmxml.sync()

    # Do the attach action.
    vm.attach_disk(source=img_path, target="vdf", extra="--persistent")

    # Check result.
    try:
        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                raise error.TestFail('Test successed in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        vm.detach_disk(target="vdf", extra="--persistent")
        image.remove()
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
Beispiel #20
0
def run_libvirt_scsi(test, params, env):
    # Get variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    img_type = ('yes' == params.get("libvirt_scsi_img_type", "no"))
    cdrom_type = ('yes' == params.get("libvirt_scsi_cdrom_type", "no"))
    partition_type = ('yes' == params.get("libvirt_scsi_partition_type", "no"))
    partition = params.get("libvirt_scsi_partition",
                           "ENTER.YOUR.AVAILABLE.PARTITION")
    vm_name = params.get("main_vm", "virt-tests-vm1")
    # Init a VM instance and a VMXML instance.
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    # Keep a backup of xml to restore it in cleanup.
    backup_xml = vmxml.copy()
    # Add a scsi controller if there is not.
    controller_devices = vmxml.get_devices("controller")
    scsi_controller_exists = False
    for device in controller_devices:
        if device.type == "scsi":
            scsi_controller_exists = True
            break
    if not scsi_controller_exists:
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)
    # Add disk with bus of scsi into vmxml.
    if img_type:
        # Init a QemuImg instance.
        img_name = "libvirt_scsi"
        params['image_name'] = img_name
        image = qemu_storage.QemuImg(params, data_dir.get_tmp_dir(), img_name)
        # Create a image.
        img_path, _ = image.create(params)
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {
                'file': img_path
            }})
        img_disk.target = {'dev': "vde", 'bus': "scsi"}
        vmxml.add_device(img_disk)
    if cdrom_type:
        # Init a CdromDisk instance.
        cdrom_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_scsi")
        cdrom = CdromDisk(cdrom_path, data_dir.get_tmp_dir())
        cdrom.close()
        cdrom_disk = Disk(type_name="file")
        cdrom_disk.device = "cdrom"
        cdrom_disk.target = {'dev': "vdf", 'bus': "scsi"}
        cdrom_disk.source = cdrom_disk.new_disk_source(
            **{'attrs': {
                'file': cdrom_path
            }})
        vmxml.add_device(cdrom_disk)
    if partition_type:
        if partition.count("ENTER.YOUR"):
            raise error.TestNAError("Partition for partition test"
                                    "is not configured.")
        partition_disk = Disk(type_name="block")
        partition_disk.device = "disk"
        partition_disk.target = {'dev': "vdg", 'bus': "scsi"}
        partition_disk.source = partition_disk.new_disk_source(
            **{'attrs': {
                'dev': partition
            }})
        vmxml.add_device(partition_disk)
    # sync the vmxml with VM.
    vmxml.sync()
    # Check the result of scsi disk.
    try:
        try:
            vm.start()
            # Start VM successfully.
            if status_error:
                raise error.TestFail('Starting VM successed in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up.
        backup_xml.sync()
Beispiel #21
0
def run(test, params, env):
    """
    Luks key management by qemu-img amend command
    1. Create a luks image
    2. (1) Add a new password to a free keyslot (with_specified_index 1)
       e.g. qemu-img amend --object secret,id=sec0,data=redhat
            --object secret,id=sec1,data=amend
            -o keyslot=1,state=active,new-secret=sec1
            'json:{"file": {"driver": "file", "filename": "stg.luks"},
            "driver": "luks", "key-secret": "sec0"}'
       (2) check slots -> active is True
    3. (1) Add a new password to a free keyslot (not setting keyslot index)
       e.g qemu-img amend --object secret,id=sec0,data=redhat
            --object secret,id=sec1,data=amend
            -o state=active,new-secret=sec1
            'json:{"file": {"driver": "file", "filename": "stg.luks"},
            "driver": "luks", "key-secret": "sec0"}'
       (2) check slots -> active is True
    4. Negative test, overwrite active keyslot 0
    5. Negative test, add a new password to invalid keyslot 8
       It must be between 0 and 7
    6. (1) Erase password from keyslot by giving a keyslot index
       in this case, adding a password to keyslot 7 and then erase it
       e.g. qemu-img amend --object secret,id=sec0,data=redhat
            --object secret,id=sec1,data=amend
            -o state=inactive,keyslot=7
            'json:{"file": {"driver": "file", "filename": "base.luks"},
            "driver": "luks", "key-secret": "sec0"}'
       (2) check slots -> active is False
    7. (1) Erase password from keyslot by giving the password
       in this case, adding a password to keyslot 7 and then erase it
       e.g. qemu-img amend --object secret,id=sec0,data=redhat
            --object secret,id=sec1,data=amend
            -o state=inactive,old-secret=sec1
            'json:{"file": {"driver": "file", "filename": "base.luks"},
            "driver": "luks", "key-secret": "sec0"}'
       (2) check slots -> active is False
    8. Negative test, erase the only active keyslot 0

    :param test: VT test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    err_info = params.get("err_info")
    root_dir = data_dir.get_data_dir()
    stg = params["images"]
    stg_params = params.object_params(stg)
    stg_img = qemu_storage.QemuImg(stg_params, root_dir, stg)
    stg_img.create(stg_params)

    erase_password = params.get("erase_password")
    if erase_password:
        # add a new password to keyslot and then erase it
        stg_img.amend(stg_params, ignore_status=True)

        # for luks-insied-qcow2, prefixed with encrypt.
        # e.g. amend_encrypt.state = inactive
        # luks likes amend_state = inactive
        encrypt = "encrypt." if stg_img.image_format == "qcow2" else ""
        stg_params.pop("amend_%snew-secret" % encrypt)
        stg_params["amend_%sstate" % encrypt] = "inactive"
        if erase_password == "password":
            stg_params.pop("amend_%skeyslot" % encrypt)
            stg_params["amend_%sold-secret" %
                       encrypt] = stg_params["amend_secret_id"]

    cmd_result = stg_img.amend(stg_params, ignore_status=True)
    if err_info:
        if not re.search(err_info, cmd_result.stderr.decode(), re.I):
            test.fail("Failed to get error information. The actual error "
                      "information is %s." % cmd_result.stderr.decode())
    elif cmd_result.exit_status != 0:
        test.fail("Failed to amend image %s. The error information is "
                  "%s." % (stg_img.image_filename, cmd_result.stderr.decode()))
    else:
        info = json.loads(stg_img.info(output="json"))
        if stg_img.image_format == "qcow2":
            key_state = stg_params["amend_encrypt.state"]
            key_slot = params.get_numeric("amend_encrypt.keyslot", 1)
            state = info["format-specific"]["data"]["encrypt"]["slots"][
                key_slot]["active"]
        else:
            key_state = stg_params["amend_state"]
            key_slot = params.get_numeric("amend_keyslot", 1)
            state = info["format-specific"]["data"]["slots"][key_slot][
                "active"]
        key_state = True if key_state == "active" else False
        if key_state != state:
            test.fail("The key state is %s, it should be %s." %
                      (state, key_state))
Beispiel #22
0
def run_boot_savevm(test, params, env):
    """
    libvirt boot savevm test:

    1) Start guest booting
    2) Record origin informations of snapshot list for floppy(optional).
    3) Periodically savevm/loadvm while guest booting
    4) Stop test when able to login, or fail after timeout seconds.
    5) Check snapshot list for floppy and compare with the origin
       one(optional).

    @param test: test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    if params.get("with_floppy") == "yes":
        floppy_name = params.get("floppies", "fl")
        floppy_params = {
            "image_format": params.get("floppy_format", "qcow2"),
            "image_size": params.get("floppy_size", "1.4M"),
            "image_name": params.get("%s_name" % floppy_name, "images/test"),
            "vm_type": params.get("vm_type"),
            "qemu_img_binary": utils_misc.get_qemu_img_binary(params)
        }
        floppy = qemu_storage.QemuImg(floppy_params, data_dir.get_data_dir(),
                                      floppy_name)
        floppy.create(floppy_params)
        floppy_orig_info = floppy.snapshot_list()
        vm.create(params=params)

    vm.verify_alive()  # This shouldn't require logging in to guest
    savevm_delay = float(params["savevm_delay"])
    savevm_login_delay = float(params["savevm_login_delay"])
    savevm_login_timeout = float(params["savevm_timeout"])
    savevm_statedir = params.get("savevm_statedir", tempfile.gettempdir())
    fd, savevm_statefile = tempfile.mkstemp(suffix='.img',
                                            prefix=vm.name + '-',
                                            dir=savevm_statedir)
    os.close(fd)  # save_to_file doesn't need the file open
    start_time = time.time()
    cycles = 0

    successful_login = False
    while (time.time() - start_time) < savevm_login_timeout:
        logging.info("Save/Restore cycle %d", cycles + 1)
        time.sleep(savevm_delay)
        vm.pause()
        if params['save_method'] == 'save_to_file':
            vm.save_to_file(savevm_statefile)  # Re-use same filename
            vm.restore_from_file(savevm_statefile)
        else:
            vm.savevm("1")
            vm.loadvm("1")
        vm.resume()  # doesn't matter if already running or not
        vm.verify_kernel_crash()  # just in case
        try:
            vm.wait_for_login(timeout=savevm_login_delay)
            successful_login = True  # not set if timeout expires
            os.unlink(savevm_statefile)  # don't let these clutter disk
            break
        except:
            pass  # loop until successful login or time runs out
        cycles += 1

    time_elapsed = int(time.time() - start_time)
    info = "after %s s, %d load/save cycles" % (time_elapsed, cycles + 1)
    if not successful_login:
        raise error.TestFail("Can't log on '%s' %s" % (vm.name, info))
    else:
        logging.info("Test ended %s", info)

    if params.get("with_floppy") == "yes":
        vm.destroy()
        floppy_info = floppy.snapshot_list()
        if floppy_info == floppy_orig_info:
            raise error.TestFail("savevm didn't create snapshot in floppy."
                                 "    original snapshot list is: %s"
                                 "    now snapshot list is: %s" %
                                 (floppy_orig_info, floppy_info))
Beispiel #23
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about pool vol
    with_pool_vol = 'yes' == params.get("with_pool_vol", "no")
    check_cap_rawio = "yes" == params.get("check_cap_rawio", "no")
    virt_use_nfs = params.get("virt_use_nfs", "off")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in disks.values():
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # set qemu conf
        if check_cap_rawio:
            qemu_conf.user = '******'
            qemu_conf.group = 'root'
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        # Set the context of the VM.
        vmxml.set_seclabel([sec_dict])
        vmxml.sync()
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        if with_pool_vol:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            logging.debug("pool_type %s" % pool_type)
            pvt.pre_pool(pool_name, pool_type, pool_target,
                         emulated_image, image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = pv.list_volumes().keys()
                if vols:
                    vol_name = vols[0]
                else:
                    raise error.TestNAError("No volume in pool: %s" % pool_name)
            else:
                vol_arg = {'name': vol_name, 'format': vol_format,
                           'capacity': 1073741824,
                           'allocation': 1048576, }
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" % newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name, vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    raise error.TestNAError("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                raise error.TestNAError("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["iscsi", "disk"]:
                extra = "--driver qemu --type lun --rawio --persistent"
            else:
                extra = "--persistent --subdriver qcow2"

            # set host_sestatus as nfs pool will reset it
            utils_selinux.set_status(host_sestatus)
            # set virt_use_nfs
            result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs)
            if result.exit_status:
                raise error.TestNAError("Failed to set virt_use_nfs value")
        else:
            # Init a QemuImg instance.
            params['image_name'] = img_name
            tmp_dir = data_dir.get_tmp_dir()
            image = qemu_storage.QemuImg(params, tmp_dir, img_name)
            # Create a image.
            img_path, result = image.create(params)
            # Set the context of the image.
            utils_selinux.set_context_of_file(filename=img_path, context=img_label)
            extra = "--persistent"

        # Do the attach action.
        result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                                   extra=extra, debug=True)
        if result.exit_status:
            raise error.TestFail("Failed to attach disk %s to VM."
                                 "Detail: %s." % (img_path, result.stderr))

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                raise error.TestFail('Test succeeded in negative case.')

            if check_cap_rawio:
                cap_list = ['CapPrm', 'CapEff', 'CapBnd']
                cap_dict = {}
                pid = vm.get_pid()
                pid_status_path = "/proc/%s/status" % pid
                with open(pid_status_path) as f:
                    for line in f:
                        val_list = line.split(":")
                        if val_list[0] in cap_list:
                            cap_dict[val_list[0]] = int(val_list[1].strip(), 16)

                # bit and with rawio capabilitiy value to check cap_sys_rawio
                # is set
                cap_rawio_val = 0x0000000000020000
                for i in cap_list:
                    if not cap_rawio_val & cap_dict[i]:
                        err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        err_msg += " lack cap_sys_rawio capabilities"
                        raise error.TestFail(err_msg)
                    else:
                        inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        inf_msg += " have cap_sys_rawio capabilities"
                        logging.debug(inf_msg)

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)

        try:
            virsh.detach_disk(vm_name, target="vdf", extra="--persistent",
                              debug=True)
        except error.CmdError:
            raise error.TestFail("Detach disk 'vdf' from VM %s failed."
                                 % vm.name)
Beispiel #24
0
def run(test, params, env):
    """
    Test vm backingchain, blockcopy
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    case = params.get('case', '')

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    libvirt_version.is_libvirt_feature_supported(params)

    file_to_del = []
    tmp_dir = data_dir.get_data_dir()

    try:
        if case:
            if case == 'reuse_external':
                # Create a transient vm for test
                vm.undefine()
                virsh.create(vmxml.xml)

                all_disks = vmxml.get_disk_source(vm_name)
                if not all_disks:
                    test.error('Not found any disk file in vm.')
                image_file = all_disks[0].find('source').get('file')
                disk_dev = all_disks[0].find('target').get('dev')
                logging.debug('Image file of vm: %s', image_file)

                # Get image info
                image_info = utils_misc.get_image_info(image_file)
                logging.info('Image info: %s', image_info)

                # Get Virtual size of the image file
                vsize = image_info['vsize'] / 1073741824.0
                logging.info('Virtual size of image file: %f', vsize)

                new_image_size = vsize
                image_dir = '/'.join(image_file.split('/')[:-1])
                new_image_path = os.path.join(
                    image_dir,
                    'new_image_' + utils_misc.generate_random_string(3))
                file_to_del.append(new_image_path)

                # Create new image file
                cmd_image_create = 'qemu-img create -f qcow2 %s %fG' % (
                    new_image_path, new_image_size)
                process.run(cmd_image_create, shell=True, verbose=True)

                # Do blockcopy with --reuse-external option
                virsh.blockcopy(vm_name,
                                disk_dev,
                                new_image_path,
                                options='--verbose --wait --reuse-external',
                                debug=True,
                                ignore_status=False)
                virsh.blockjob(vm_name,
                               disk_dev,
                               options='--pivot',
                               debug=True,
                               ignore_status=False)
                logging.debug('Current vm xml: %s', vmxml)

                # Current disk source file should be new image
                cur_disks = vmxml.get_disk_source(vm_name)
                cur_sfile = cur_disks[0].find('source').get('file')
                logging.debug('Now disk source file is: %s', cur_sfile)
                if cur_sfile.strip() != new_image_path:
                    test.fail('Disk source file is not updated.')
            if case == 'custom_cluster_size':

                def update_vm_with_cluster_disk():
                    """
                    Update vm's first disk with a image which has customized
                    cluster size

                    :return: The source image params
                    """
                    source_img_params = params.copy()
                    source_img_params['image_name'] = params.get(
                        'source_image_name', 'source_image')
                    source_img = qemu_storage.QemuImg(source_img_params,
                                                      tmp_dir, '')
                    source_img_path, _ = source_img.create(source_img_params)
                    file_to_del.append(source_img_path)
                    source_img_params['disk_source_name'] = source_img_path
                    libvirt.set_vm_disk(vm, source_img_params)
                    return source_img_params

                source_img_params = update_vm_with_cluster_disk()
                all_disks = vmxml.get_disk_source(vm_name)
                if not all_disks:
                    test.error('Not found any disk file in vm.')
                disk_dev = all_disks[0].find('target').get('dev')

                # Blockcopy the source image to the target image path
                target_img_params = source_img_params.copy()
                target_img_name = params.get('target_image_name',
                                             'target_image')
                target_img_params['image_name'] = target_img_name
                target_img_path = os.path.join(tmp_dir,
                                               target_img_name + '.qcow2')
                file_to_del.append(target_img_path)
                virsh.blockcopy(vm_name,
                                disk_dev,
                                target_img_path,
                                options='--verbose --wait --transient-job',
                                debug=True,
                                ignore_status=False)
                target_img = qemu_storage.QemuImg(target_img_params, tmp_dir,
                                                  '')
                target_img_info = json.loads(
                    target_img.info(force_share=True, output='json'))

                # Compare the source and target images' cluster size
                source_img_cluster = str(
                    source_img_params.get('image_cluster_size'))
                target_img_cluster = str(target_img_info['cluster-size'])
                if source_img_cluster != target_img_cluster:
                    test.fail("Images have different cluster size:\n"
                              "Source image cluster size: %s\n"
                              "Target image cluster size: %s" %
                              (source_img_cluster, target_img_cluster))

                # Abort the blockcopy job
                virsh.blockjob(vm_name,
                               disk_dev,
                               options='--abort',
                               debug=True,
                               ignore_status=False)

    finally:
        if case == 'reuse_external':
            # Recover vm and remove the transient vm
            virsh.destroy(vm_name, debug=True)
            virsh.define(bkxml.xml, debug=True)
        bkxml.sync()

        # Remove files to be deleted
        if file_to_del:
            for item in file_to_del:
                if os.path.exists(item):
                    os.remove(item)
Beispiel #25
0
def test_md_test(test, vm, params):
    """
    Test command scrub-device
    """
    add_ref = params.get("gf_add_ref", "disk")
    readonly = "yes" == params.get("gf_add_readonly")

    gf = utils_test.libguestfs.GuestfishTools(params)
    if add_ref == "disk":
        image_path = params.get("image_path")
    elif add_ref == "domain":
        vm_name = params.get("main_vm")
        gf.add_domain(vm_name, readonly=readonly)

    image_dir = params.get("img_dir", data_dir.get_tmp_dir())
    image_name = params.get("image_name")
    image_format = params["image_format"]
    image_size = params["image_size"]

    params['image_size'] = "1G"
    for name in ['md1', 'md2', 'md3']:
        params['image_name'] = name
        image = qemu_storage.QemuImg(params, image_dir, '')
        image_path, _ = image.create(params)
        gf.add_drive_opts(image_path, readonly=readonly)

    gf.run()

    md = gf.list_md_devices().stdout.strip()
    if md:
        # close the existed md device
        gf.md_stop(md)

    device = "'/dev/sda /dev/sdb /dev/sdc'"
    gf.md_create("md11",
                 device,
                 missingbitmap="0x4",
                 chunk=8192,
                 level="raid6")
    md = gf.list_md_devices().stdout.strip()
    logging.debug(md)
    if not md:
        gf.close_session()
        test.fail("Can not find the md device")

    detail = gf.md_detail(md).stdout.strip()
    logging.debug(detail)
    if "level: raid6" not in detail or "devname: md11" not in detail:
        gf.close_session()
        test.fail("MD detail info is not correct")

    gf.md_stop(md)

    gf.md_create("md21",
                 device,
                 nrdevices=2,
                 spare=1,
                 chunk=8192,
                 level="raid4")
    md = gf.list_md_devices().stdout.strip()
    logging.debug(md)
    if not md:
        gf.close_session()
        test.fail("Can not find the md device")
    stat = gf.md_stat(md).stdout.strip()
    logging.debug(stat)
    for i in re.findall("\w+", device):
        if i not in stat:
            gf.close_session()
            test.fail("MD stat is not correct")
    if "mdstat_flags: S" not in stat:
        gf.close_session()
        test.fail("There should be a S flag for spare disk")

    gf.close_session()

    params["image_name"] = image_name
    params["image_size"] = image_size
Beispiel #26
0
def run(test, params, env):
    """
    Check the dump info of snapshot files over nbd.
    1. Create a base image with 4 clusters of 64k.
    3. Create a top snapshot based on the base image.
    4. Write data to the first/second/third cluster of the base image file.
    5. Write data to the second/third cluster of the top image.
    6. Export the snapshot image over NBD.
    7. Check the dump info of the snapshot over NBD.
    :param test: VT test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def _qemu_io(img, cmd):
        """Run qemu-io cmd to a given img."""
        try:
            QemuIOSystem(test, params, img.image_filename).cmd_output(cmd, 120)
        except process.CmdError as err:
            test.fail("qemu-io to '%s' failed: %s." %
                      (img.image_filename, str(err)))

    images = params["image_chain"].split()
    base_img = images[0]
    top_img = images[1]
    root_dir = data_dir.get_data_dir()
    base = QemuImg(params.object_params(base_img), root_dir, base_img)
    top = QemuImg(params.object_params(top_img), root_dir, top_img)

    # write data to the base image
    _qemu_io(base, params["base_io_cmd_01"])
    _qemu_io(base, params["base_io_cmd_02"])
    _qemu_io(base, params["base_io_cmd_03"])

    # write data to the top image
    _qemu_io(top, params["top_io_cmd_01"])
    _qemu_io(top, params["top_io_cmd_02"])

    # export the top image over nbd
    nbd_export = QemuNBDExportImage(params, top_img)
    nbd_export.export_image()

    nbd_image_tag = params['nbd_image_tag']
    nbd_image_params = params.object_params(nbd_image_tag)
    localhost = socket.gethostname()
    nbd_image_params['nbd_server'] = localhost if localhost else 'localhost'
    qemu_img = qemu_storage.QemuImg(nbd_image_params, None, nbd_image_tag)
    nbd_image = qemu_img.image_filename
    map_cmd = params["map_cmd"]
    check_msg = params["check_msg"]

    logging.info("Dump the info of '%s'", nbd_image)
    try:
        result = process.run(map_cmd + " " + nbd_image,
                             ignore_status=True,
                             shell=True)
        if result.exit_status != 0:
            test.fail('Failed to execute the map command, error message: %s' %
                      result.stderr.decode())
        elif check_msg not in result.stdout.decode().strip():
            test.fail("Message '%s' mismatched with '%s'" %
                      (check_msg, result.stdout.decode()))
    finally:
        nbd_export.stop_export()
Beispiel #27
0
def run(test, params, env):
    """
    live_snapshot chain test:

    Will test snapshot as following steps:
    1. Boot up guest with base image
    2. Do pre snapshot operates(option)
    3. Do live snapshot
    4. Do post snapshot operates(option)
    5. Check the base and snapshot images(option)

    :param test: Kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def get_base_image(snapshot_chain, snapshot_file):
        try:
            index = snapshot_chain.index(snapshot_file)
        except ValueError:
            index = -1

        if index > 0:
            base_image = snapshot_chain[index - 1]
        else:
            base_image = None
        return base_image

    def do_operate(params, key_word):
        operate_cmd = params.get(key_word)
        timeout = int(params.get("operate_timeout", "60"))
        for cmd in re.findall("{(.+?)}", operate_cmd):
            if re.match("shell:", cmd):
                cmd = cmd[6:]
                session.cmd(cmd, timeout=timeout)
            elif re.match("shell_no_reply:", cmd):
                cmd = cmd[15:]
                session.sendline(cmd)
                time.sleep(timeout)
            elif re.match("monitor:", cmd):
                cmd = cmd[8:]
                vm.monitor.send_args_cmd(cmd)

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    snapshot_chain = params.get("snapshot_chain")
    file_create_cmd = params.get("file_create_cmd")
    file_check_cmd = params.get("file_check_cmd")
    file_dir = params.get("file_dir")
    dir_create_cmd = params.get("dir_create_cmd")
    md5_cmd = params.get("md5_cmd")

    snapshot_chain = re.split("\s+", snapshot_chain)
    session = vm.wait_for_login(timeout=timeout)

    md5_value = {}
    files_in_guest = {}
    for index, image in enumerate(snapshot_chain):
        image_params = params.object_params(image)
        if image_params.get("file_create"):
            session.cmd(dir_create_cmd % file_dir)
        if index > 0:
            snapshot_file = storage.get_image_filename(image_params,
                                                       data_dir.get_data_dir())
            base_image = get_base_image(snapshot_chain, image)
            base_image_params = params.object_params(base_image)
            base_file = storage.get_image_filename(base_image_params,
                                                   data_dir.get_data_dir())
            snapshot_format = image_params.get("image_format")

            error.context("Do pre snapshot operates", logging.info)
            if image_params.get("pre_snapshot_cmd"):
                do_operate(image_params, "pre_snapshot_cmd")

            error.context("Do live snapshot ", logging.info)
            vm.live_snapshot(base_file, snapshot_file, snapshot_format)

            error.context("Do post snapshot operates", logging.info)
            if image_params.get("post_snapshot_cmd"):
                do_operate(image_params, "post_snapshot_cmd")
            md5 = ""
            if image_params.get("file_create"):
                session.cmd(file_create_cmd % image)
                md5 = session.cmd_output(md5_cmd % image)
            md5_value[image] = md5_value[base_image].copy()
            md5_value[image].update({image: md5})
        elif index == 0:
            md5 = ""
            if params.get("file_create"):
                session.cmd(file_create_cmd % image)
                md5 = session.cmd_output(md5_cmd % image)
            md5_value[image] = {image: md5}
        if image_params.get("check_alive_cmd"):
            session.cmd(image_params.get("check_alive_cmd"))
        if image_params.get("file_create"):
            files_check = session.cmd(file_check_cmd % file_dir)
            files_in_guest[image] = files_check
    session.close()

    error.context("Do base files check", logging.info)
    snapshot_chain_backward = snapshot_chain[:]
    snapshot_chain_backward.reverse()
    for index, image in enumerate(snapshot_chain_backward):
        image_params = params.object_params(image)
        if image_params.get("check_base_image"):
            vm.destroy()
            vm.create(params=image_params)
            vm.verify_alive()

            session = vm.wait_for_login(timeout=timeout)
            if image_params.get("file_create"):
                for file in md5_value[image]:
                    md5 = session.cmd_output(md5_cmd % file)
                    if md5 != md5_value[image][file]:
                        error_message = "File %s in image %s changed " %\
                                        (file, image)
                        error_message += "from '%s' to '%s'(md5)" %\
                                         (md5_value[image][file], md5)
                        raise error.TestFail(error_message)
                files_check = session.cmd(file_check_cmd % file_dir)
                if files_check != files_in_guest[image]:
                    error_message = "Files in image %s is not as expect:" %\
                                    image
                    error_message += "Before shut down: %s" %\
                        files_in_guest[image]
                    error_message += "Now: %s" % files_check
                    raise error.TestFail(error_message)
            if image_params.get("image_check"):
                image = qemu_storage.QemuImg(image_params,
                                             data_dir.get_data_dir(), image)
                image.check_image(image_params, data_dir.get_data_dir())
            session.close()

    error.context("Remove snapshot images", logging.info)
    if vm.is_alive():
        vm.destroy()
    if params.get("remove_snapshot_images"):
        for index, image in enumerate(snapshot_chain):
            image_params = params.object_params(image)
            if index != 0:
                image = qemu_storage.QemuImg(image_params,
                                             data_dir.get_data_dir(), image)
                image.remove()
 def __disk_define_by_params(self, params, image_name):
     images_dir = data_dir.get_data_dir()
     image_params = params.object_params(image_name)
     return qemu_storage.QemuImg(image_params, images_dir, image_name)
Beispiel #29
0
def run(test, params, env):
    """
    Test DAC in adding nfs pool disk to VM.

    (1).Init variables for test.
    (2).Create nfs pool and vol.
    (3).Attach the nfs pool vol to VM.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
    # Get variables about pool vol
    virt_use_nfs = params.get("virt_use_nfs", "off")
    nfs_server_dir = params.get("nfs_server_dir", "nfs-server")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    export_options = params.get("export_options",
                                "rw,async,no_root_squash,fsid=0")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    bk_file_name = params.get("bk_file_name")
    # Get pool vol variables
    img_tup = ("img_user", "img_group", "img_mode")
    img_val = []
    for i in img_tup:
        try:
            img_val.append(int(params.get(i)))
        except ValueError:
            raise error.TestNAError("%s value '%s' is not a number." %
                                    (i, params.get(i)))
    img_user, img_group, img_mode = img_val

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Backup domain disk label
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        os.close(f)

    # Backup selinux status of host.
    backup_sestatus = utils_selinux.get_status()

    pvt = None
    snapshot_name = None
    disk_snap_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # chown domain disk to qemu:qemu to avoid fail on local disk
        for disk in disks.values():
            disk_path = disk['source']
            if qemu_user == "root":
                os.chown(disk_path, 0, 0)
            elif qemu_user == "qemu":
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_user
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        # Create dst pool for create attach vol img
        logging.debug("export_options is: %s" % export_options)
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name, pool_type, pool_target,
                     emulated_image, image_size="1G",
                     pre_disk_vol=["20M"],
                     export_options=export_options)

        # set virt_use_nfs
        result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs)
        if result.exit_status:
            raise error.TestNAError("Failed to set virt_use_nfs value")

        # Init a QemuImg instance and create img on nfs server dir.
        params['image_name'] = vol_name
        tmp_dir = data_dir.get_tmp_dir()
        nfs_path = os.path.join(tmp_dir, nfs_server_dir)
        image = qemu_storage.QemuImg(params, nfs_path, vol_name)
        # Create a image.
        server_img_path, result = image.create(params)

        if params.get("image_name_backing_file"):
            params['image_name'] = bk_file_name
            params['has_backing_file'] = "yes"
            image = qemu_storage.QemuImg(params, nfs_path, bk_file_name)
            server_img_path, result = image.create(params)

        # Get vol img path
        vol_name = server_img_path.split('/')[-1]
        virsh.pool_refresh(pool_name, debug=True)
        cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
        if cmd_result.exit_status:
            raise error.TestNAError("Failed to get volume path from pool.")
        img_path = cmd_result.stdout.strip()

        # Do the attach action.
        extra = "--persistent --subdriver qcow2"
        result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                                   extra=extra, debug=True)
        if result.exit_status:
            raise error.TestFail("Failed to attach disk %s to VM."
                                 "Detail: %s." % (img_path, result.stderr))

        # Change img ownership and mode on nfs server dir
        os.chown(server_img_path, img_user, img_group)
        os.chmod(server_img_path, img_mode)

        img_label_before = check_ownership(server_img_path)
        if img_label_before:
            logging.debug("attached image ownership on nfs server before "
                          "start: %s" % img_label_before)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.

            img_label_after = check_ownership(server_img_path)
            if img_label_after:
                logging.debug("attached image ownership on nfs server after"
                              " start: %s" % img_label_after)

            if status_error:
                raise error.TestFail('Test succeeded in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)

        if params.get("image_name_backing_file"):
            options = "--disk-only"
            snapshot_result = virsh.snapshot_create(vm_name, options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if not status_error:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)

        if snapshot_name:
            disks_snap = vm.get_disk_devices()
            for disk in disks_snap.values():
                disk_snap_path.append(disk['source'])
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata",
                                  debug=True)

        try:
            virsh.detach_disk(vm_name, target="vdf", extra="--persistent",
                              debug=True)
        except error.CmdError:
            raise error.TestFail("Detach disk 'vdf' from VM %s failed."
                                 % vm.name)
Beispiel #30
0
def run(test, params, env):
    """
    check if qemu-img rebase could bypass host cache.
    1) create snapshot chain image1 -> sn1 -> sn2
    2) rebase sn2 to image1 and check the open syscall that no flag O_DIRECT
    3) create snapshot chain image1 -> sn1 -> sn2
    4) rebase sn2 to image1 with cache mode 'none' and check flag O_DIRECT
    is on.
    """
    def remove_snapshots():
        """Remove snapshots created."""
        while snapshots:
            snapshot = snapshots.pop()
            snapshot.remove()

    def parse_snapshot_chain(target):
        """Parse snapshot chain."""
        image_chain = params["image_chain"].split()
        for snapshot in image_chain[1:]:
            target.send(snapshot)

    @coroutine
    def create_snapshot(target):
        """Create snapshot."""
        while True:
            snapshot = yield
            logging.debug("create image %s", snapshot)
            snapshot_params = params.object_params(snapshot)
            snapshot = qemu_storage.QemuImg(snapshot_params, root_dir, snapshot)
            fail_on((process.CmdError,))(snapshot.create)(snapshot.params)
            snapshots.append(snapshot)
            target.send(snapshot)

    @coroutine
    def save_file_to_snapshot():
        """Save temporary file to snapshot."""
        sync_bin = params.get("sync_bin", "sync")
        while True:
            snapshot = yield
            logging.debug("boot vm from image %s", snapshot.tag)
            vm = img_utils.boot_vm_with_images(test, params, env,
                                               images=(snapshot.tag,),
                                               vm_name="VM_%s" % snapshot.tag)
            guest_file = params["guest_tmp_filename"] % snapshot.tag
            logging.debug("create tmp file %s in %s", guest_file, snapshot.tag)
            img_utils.save_random_file_to_vm(vm, guest_file, 2048, sync_bin)
            vm.destroy()

    img_utils.find_strace()
    base = params["image_chain"].split()[0]
    params["image_name_%s" % base] = params["image_name"]
    params["image_format_%s" % base] = params["image_format"]
    root_dir = data_dir.get_data_dir()
    base = qemu_storage.QemuImg(params.object_params(base), root_dir, base)
    trace_events = params["trace_event"].split()

    snapshots = []
    parse_snapshot_chain(create_snapshot(save_file_to_snapshot()))

    strace_log = os.path.join(test.debugdir, "rebase.log")
    top = snapshots[-1]
    logging.debug("rebase snapshot %s to %s", top.tag, base.tag)
    with img_utils.strace(top, trace_events, strace_log):
        top.base_tag = base.tag
        fail_on((process.CmdError))(top.rebase)(params)

    fail_msg = "'O_DIRECT' is presented in %s with file %s"
    for image in [base] + snapshots:
        if img_utils.check_flag(strace_log, image.image_filename, "O_DIRECT"):
            test.fail(fail_msg % (trace_events, image.image_filename))

    remove_snapshots()
    parse_snapshot_chain(create_snapshot(save_file_to_snapshot()))

    strace_log = os.path.join(test.debugdir, "rebase_bypass.log")
    top = snapshots[-1]
    logging.debug("rebase snapshot %s to %s in cache mode 'none'",
                  top.tag, base.tag)
    with img_utils.strace(top, trace_events, strace_log):
        top.base_tag = base.tag
        fail_on((process.CmdError))(top.rebase)(params,
                                                cache_mode="none",
                                                source_cache_mode="none")

    fail_msg = "'O_DIRECT' is missing in %s with file %s"
    for image in [base] + snapshots:
        if not img_utils.check_flag(strace_log,
                                    image.image_filename, "O_DIRECT"):
            test.fail(fail_msg % (trace_events, image.image_filename))

    remove_snapshots()