def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout.strip(): test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout.strip(): test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err)
def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout: test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout: test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout: test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout: test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err)
def get_img_path(output): """ Get the full path of the converted image. """ img_name = vm_name + "-sda" if output_mode == "libvirt": img_path = virsh.vol_path(img_name, output_storage).stdout.strip() elif output_mode == "local": img_path = os.path.join(output_storage, img_name) elif output_mode in ["rhev", "vdsm"]: export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output) img_path = os.path.join(mnt_point, export_domain_uuid, 'images', image_uuid, vol_uuid) return img_path
def get_img_path(output): """ Get the full path of the converted image. """ img_path = "" img_name = vm_name + "-sda" if output_mode == "libvirt": img_path = virsh.vol_path(img_name, output_storage).stdout.strip() elif output_mode == "local": img_path = os.path.join(output_storage, img_name) elif output_mode in ["rhev", "vdsm"]: export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output) img_path = os.path.join(mnt_point, export_domain_uuid, 'images', image_uuid, vol_uuid) if not img_path or not os.path.isfile(img_path): raise error.TestError("Get image path: '%s' is invalid", img_path) return img_path
def run(test, params, env): """ This test cover two volume commands: vol-clone and vol-wipe. 1. Create a given type pool. 2. Create a given format volume in the pool. 3. Clone the new create volume. 4. Wipe the new clone volume. 5. Delete the volume and pool. """ pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if not os.path.dirname(pool_target): pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") vol_name = params.get("vol_name") new_vol_name = params.get("new_vol_name") vol_capability = params.get("vol_capability") vol_allocation = params.get("vol_allocation") vol_format = params.get("vol_format") clone_option = params.get("clone_option", "") wipe_algorithms = params.get("wipe_algorithms") b_luks_encrypted = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] wipe_old_vol = False if virsh.has_command_help_match("vol-clone", "--prealloc-metadata") is None: if "prealloc-metadata" in clone_option: test.cancel("Option --prealloc-metadata " "is not supported.") clone_status_error = "yes" == params.get("clone_status_error", "no") wipe_status_error = "yes" == params.get("wipe_status_error", "no") setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.cancel("API acl test not supported in current" " libvirt version.") # Using algorithms other than zero need scrub installed. try: utils_path.find_command('scrub') except utils_path.CmdNotFoundError: logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.") valid_algorithms = ["zero"] else: valid_algorithms = ["zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random"] # Choose an algorithm randomly if wipe_algorithms: alg = random.choice(wipe_algorithms.split()) else: alg = random.choice(valid_algorithms) libvirt_pvt = utlv.PoolVolumeTest(test, params) libvirt_pool = libvirt_storage.StoragePool() if libvirt_pool.pool_exists(pool_name): test.error("Pool '%s' already exist" % pool_name) try: # Create a new pool disk_vol = [] if pool_type == 'disk': disk_vol.append(params.get("pre_vol", '10M')) libvirt_pvt.pre_pool(pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, pre_disk_vol=disk_vol) libvirt_vol = libvirt_storage.PoolVolume(pool_name) # Create a new volume if vol_format in ['raw', 'qcow2', 'qed', 'vmdk']: if (b_luks_encrypted and vol_format in ['raw']): if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS is not supported in current" " libvirt version") luks_sec_uuid = create_luks_secret(os.path.join(pool_target, vol_name), encryption_password, test) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(vol_capability) vol_arg['allocation'] = int(vol_allocation) create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: libvirt_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name) elif vol_format == 'partition': vol_name = list(utlv.get_vol_list(pool_name).keys())[0] logging.debug("Find partition %s in disk pool", vol_name) elif vol_format == 'sparse': # Create a sparse file in pool sparse_file = pool_target + '/' + vol_name cmd = "dd if=/dev/zero of=" + sparse_file cmd += " bs=1 count=0 seek=" + vol_capability process.run(cmd, ignore_status=True, shell=True) else: test.error("Unknown volume format %s" % vol_format) # Refresh the pool virsh.pool_refresh(pool_name, debug=True) vol_info = libvirt_vol.volume_info(vol_name) if not vol_info: test.error("Fail to get info of volume %s" % vol_name) for key in vol_info: logging.debug("Original volume info: %s = %s", key, vol_info[key]) # Metadata preallocation is not support for block volume if vol_info["Type"] == "block" and clone_option.count("prealloc-metadata"): clone_status_error = True if b_luks_encrypted: wipe_old_vol = True if pool_type == "disk": new_vol_name = utlv.new_disk_vol_name(pool_name) if new_vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % new_vol_name utlv.update_polkit_rule(params, vol_pat, new_value) bad_cloned_vol_name = params.get("bad_cloned_vol_name", "") if bad_cloned_vol_name: new_vol_name = bad_cloned_vol_name # Clone volume clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True) if not clone_status_error: if clone_result.exit_status != 0: test.fail("Clone volume fail:\n%s" % clone_result.stderr.strip()) else: vol_info = libvirt_vol.volume_info(new_vol_name) for key in vol_info: logging.debug("Cloned volume info: %s = %s", key, vol_info[key]) logging.debug("Clone volume successfully.") # Wipe the new clone volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = ["Unsupported algorithm", "no such pattern sequence"] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(new_vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") elif clone_status_error and clone_result.exit_status == 0: test.fail("Expect clone volume fail, but run" " successfully.") if wipe_old_vol: # Wipe the old volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = ["Unsupported algorithm", "no such pattern sequence"] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") if bad_cloned_vol_name: pattern = "volume name '%s' cannot contain '/'" % new_vol_name if re.search(pattern, clone_result.stderr) is None: test.fail("vol-clone failed with unexpected reason") finally: # Clean up try: libvirt_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) except exceptions.TestFail as detail: logging.error(str(detail))
def run(test, params, env): """ Test DAC in adding nfs pool disk to VM. (1).Init variables for test. (2).Create nfs pool and vol. (3).Attach the nfs pool vol to VM. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing") # Get qemu.conf config variables qemu_user = params.get("qemu_user") qemu_group = params.get("qemu_group") dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about pool vol virt_use_nfs = params.get("virt_use_nfs", "off") nfs_server_dir = params.get("nfs_server_dir", "nfs-server") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") export_options = params.get("export_options", "rw,async,no_root_squash") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") bk_file_name = params.get("bk_file_name") # Get pool vol variables img_tup = ("img_user", "img_group", "img_mode") img_val = [] for i in img_tup: try: img_val.append(int(params.get(i))) except ValueError: test.cancel("%s value '%s' is not a number." % (i, params.get(i))) # False positive - img_val was filled in the for loop above. # pylint: disable=E0632 img_user, img_group, img_mode = img_val # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vm_os_xml = vmxml.os # Backup domain disk label disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in list(disks.values()): disk_path = disk['source'] label = check_ownership(disk_path) if label: backup_labels_of_disks[disk_path] = label try: if vm_os_xml.nvram: nvram_path = vm_os_xml.nvram if not os.path.exists(nvram_path): # Need libvirt automatically generate the path vm.start() vm.destroy(gracefully=False) label = check_ownership(nvram_path) if label: backup_labels_of_disks[nvram_path] = label except xcepts.LibvirtXMLNotFoundError: logging.debug("vm xml don't have nvram element") # Backup selinux status of host. backup_sestatus = utils_selinux.get_status() pvt = None snapshot_name = None disk_snap_path = [] qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # chown domain disk to qemu:qemu to avoid fail on local disk for file_path in list(backup_labels_of_disks.keys()): if qemu_user == "root": os.chown(file_path, 0, 0) elif qemu_user == "qemu": os.chown(file_path, 107, 107) else: process.run('chown %s %s' % (qemu_user, file_path), shell=True) # Set selinux of host. if backup_sestatus == "disabled": test.cancel("SELinux is in Disabled mode." "It must be Enabled to" "run this test") utils_selinux.set_status(host_sestatus) # set qemu conf qemu_conf.user = qemu_user qemu_conf.group = qemu_user if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s", qemu_conf) libvirtd.restart() # Create dst pool for create attach vol img logging.debug("export_options is: %s", export_options) pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], export_options=export_options) # set virt_use_nfs result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True) if result.exit_status: test.cancel("Failed to set virt_use_nfs value") # Init a QemuImg instance and create img on nfs server dir. params['image_name'] = vol_name tmp_dir = data_dir.get_tmp_dir() nfs_path = os.path.join(tmp_dir, nfs_server_dir) image = qemu_storage.QemuImg(params, nfs_path, vol_name) # Create a image. server_img_path, result = image.create(params) if params.get("image_name_backing_file"): params['image_name'] = bk_file_name params['has_backing_file'] = "yes" image = qemu_storage.QemuImg(params, nfs_path, bk_file_name) server_img_path, result = image.create(params) # Get vol img path vol_name = server_img_path.split('/')[-1] virsh.pool_refresh(pool_name, debug=True) cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: test.cancel("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() # Do the attach action. extra = "--persistent --subdriver qcow2" result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: test.fail("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Change img ownership and mode on nfs server dir os.chown(server_img_path, img_user, img_group) os.chmod(server_img_path, img_mode) img_label_before = check_ownership(server_img_path) if img_label_before: logging.debug("attached image ownership on nfs server before " "start: %s", img_label_before) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. img_label_after = check_ownership(server_img_path) if img_label_after: logging.debug("attached image ownership on nfs server after" " start: %s", img_label_after) if status_error: test.fail('Test succeeded in negative case.') except virt_vm.VMStartError as e: # Starting VM failed. if not status_error: test.fail("Test failed in positive case." "error: %s" % e) if params.get("image_name_backing_file"): options = "--disk-only" snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if not status_error: test.fail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_name: disks_snap = vm.get_disk_devices() for disk in list(disks_snap.values()): disk_snap_path.append(disk['source']) virsh.snapshot_delete(vm_name, snapshot_name, "--metadata", debug=True) try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", debug=True) except process.CmdError: test.fail("Detach disk 'vdf' from VM %s failed." % vm.name) finally: # clean up vm.destroy() qemu_conf.restore() for path, label in list(backup_labels_of_disks.items()): label_list = label.split(":") os.chown(path, int(label_list[0]), int(label_list[1])) if snapshot_name: backup_xml.sync("--snapshots-metadata") else: backup_xml.sync() for i in disk_snap_path: if i and os.path.exists(i): os.unlink(i) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) except test.fail as detail: logging.error(str(detail)) utils_selinux.set_status(backup_sestatus) libvirtd.restart()
def run(test, params, env): """ Test svirt in adding disk to VM. (1).Init variables for test. (2).Create a image to attached to VM. (3).Attach disk. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic") sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux") sec_label = params.get("svirt_attach_disk_vm_sec_label", None) sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes") sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label, 'relabel': sec_relabel} disk_seclabel = params.get("disk_seclabel", "no") # Get variables about pool vol with_pool_vol = 'yes' == params.get("with_pool_vol", "no") check_cap_rawio = "yes" == params.get("check_cap_rawio", "no") virt_use_nfs = params.get("virt_use_nfs", "off") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format", "qcow2") device_target = params.get("disk_target") device_bus = params.get("disk_target_bus") device_type = params.get("device_type", "file") # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Get varialbles about image. img_label = params.get('svirt_attach_disk_disk_label') sec_disk_dict = {'model': sec_model, 'label': img_label, 'relabel': sec_relabel} enable_namespace = 'yes' == params.get('enable_namespace', 'no') img_name = "svirt_disk" # Default label for the other disks. # To ensure VM is able to access other disks. default_label = params.get('svirt_attach_disk_disk_default_label', None) # Set selinux of host. backup_sestatus = utils_selinux.get_status() utils_selinux.set_status(host_sestatus) # Set the default label to other disks of vm. disks = vm.get_disk_devices() for disk in list(disks.values()): utils_selinux.set_context_of_file(filename=disk['source'], context=default_label) pvt = None qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() disk_xml = Disk(type_name=device_type) disk_xml.device = "disk" try: # set qemu conf if check_cap_rawio: qemu_conf.user = '******' qemu_conf.group = 'root' logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() if with_pool_vol: # Create dst pool for create attach vol img pvt = utlv.PoolVolumeTest(test, params) logging.debug("pool_type %s" % pool_type) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"]) if pool_type in ["iscsi", "disk"]: # iscsi and disk pool did not support create volume in libvirt, # logical pool could use libvirt to create volume but volume # format is not supported and will be 'raw' as default. pv = libvirt_storage.PoolVolume(pool_name) vols = list(pv.list_volumes().keys()) vol_format = "raw" if vols: vol_name = vols[0] else: test.cancel("No volume in pool: %s" % pool_name) else: vol_arg = {'name': vol_name, 'format': vol_format, 'capacity': 1073741824, 'allocation': 1048576, } # Set volume xml file volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] # Run virsh_vol_create to create vol logging.debug("create volume from xml: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create(pool_name, vol_xml, ignore_status=True, debug=True) if cmd_result.exit_status: test.cancel("Failed to create attach volume.") cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: test.cancel("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() if pool_type in ["iscsi", "disk"]: source_type = "dev" if pool_type == "iscsi": disk_xml.device = "lun" disk_xml.rawio = "yes" else: if not enable_namespace: qemu_conf.namespaces = '' logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() else: source_type = "file" # set host_sestatus as nfs pool will reset it utils_selinux.set_status(host_sestatus) # set virt_use_nfs result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True) if result.exit_status: test.cancel("Failed to set virt_use_nfs value") else: source_type = "file" # Init a QemuImg instance. params['image_name'] = img_name tmp_dir = data_dir.get_tmp_dir() image = qemu_storage.QemuImg(params, tmp_dir, img_name) # Create a image. img_path, result = image.create(params) # Set the context of the image. if sec_relabel == "no": utils_selinux.set_context_of_file(filename=img_path, context=img_label) disk_xml.target = {"dev": device_target, "bus": device_bus} disk_xml.driver = {"name": "qemu", "type": vol_format} if disk_seclabel == "yes": source_seclabel = [] sec_xml = seclabel.Seclabel() sec_xml.update(sec_disk_dict) source_seclabel.append(sec_xml) disk_source = disk_xml.new_disk_source(**{"attrs": {source_type: img_path}, "seclabels": source_seclabel}) else: disk_source = disk_xml.new_disk_source(**{"attrs": {source_type: img_path}}) # Set the context of the VM. vmxml.set_seclabel([sec_dict]) vmxml.sync() disk_xml.source = disk_source logging.debug(disk_xml) # Do the attach action. cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml.xml, flagstr='--persistent') libvirt.check_exit_status(cmd_result, expect_error=False) logging.debug("the domain xml is: %s" % vmxml.xmltreefile) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. # VM with set seclabel can access the image with the # set context. if status_error: test.fail('Test succeeded in negative case.') if check_cap_rawio: cap_list = ['CapPrm', 'CapEff', 'CapBnd'] cap_dict = {} pid = vm.get_pid() pid_status_path = "/proc/%s/status" % pid with open(pid_status_path) as f: for line in f: val_list = line.split(":") if val_list[0] in cap_list: cap_dict[val_list[0]] = int(val_list[1].strip(), 16) # bit and with rawio capabilitiy value to check cap_sys_rawio # is set cap_rawio_val = 0x0000000000020000 for i in cap_list: if not cap_rawio_val & cap_dict[i]: err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i]) err_msg += " lack cap_sys_rawio capabilities" test.fail(err_msg) else: inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i]) inf_msg += " have cap_sys_rawio capabilities" logging.debug(inf_msg) if pool_type == "disk": if libvirt_version.version_compare(3, 1, 0) and enable_namespace: vm_pid = vm.get_pid() output = process.system_output( "nsenter -t %d -m -- ls -Z %s" % (vm_pid, img_path)) else: output = process.system_output('ls -Z %s' % img_path) logging.debug("The default label is %s", default_label) logging.debug("The label after guest started is %s", to_text(output.strip().split()[-2])) if default_label not in to_text(output.strip().split()[-2]): test.fail("The label is wrong after guest started\n") except virt_vm.VMStartError as e: # Starting VM failed. # VM with set seclabel can not access the image with the # set context. if not status_error: test.fail("Test failed in positive case." "error: %s" % e) cmd_result = virsh.detach_device(domainarg=vm_name, filearg=disk_xml.xml) libvirt.check_exit_status(cmd_result, status_error) finally: # clean up vm.destroy() if not with_pool_vol: image.remove() if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) except exceptions.TestFail as detail: logging.error(str(detail)) backup_xml.sync() utils_selinux.set_status(backup_sestatus) if check_cap_rawio: qemu_conf.restore() libvirtd.restart()
def check_vol(expected, avail=True): """ Checks the expected volume details with actual volume details from vol-dumpxml vol-list vol-info vol-key vol-path qemu-img info """ error_count = 0 pv = libvirt_storage.PoolVolume(expected['pool_name']) vol_exists = pv.volume_exists(expected['name']) if vol_exists: if not avail: error_count += 1 logging.error("Expect volume %s not exists but find it", expected['name']) return error_count else: if avail: error_count += 1 logging.error("Expect volume %s exists but not find it", expected['name']) return error_count else: logging.info("Volume %s checked successfully for deletion", expected['name']) return error_count actual_list = get_vol_list(expected['pool_name'], expected['name']) actual_info = pv.volume_info(expected['name']) # Get values from vol-dumpxml volume_xml = vol_xml.VolXML.new_from_vol_dumpxml(expected['name'], expected['pool_name']) # Check against virsh vol-key vol_key = virsh.vol_key(expected['name'], expected['pool_name']) if vol_key.stdout.strip() != volume_xml.key: logging.error("Volume key is mismatch \n%s" "Key from xml: %s\nKey from command: %s", expected['name'], volume_xml.key, vol_key) error_count += 1 else: logging.debug("virsh vol-key for volume: %s successfully" " checked against vol-dumpxml", expected['name']) # Check against virsh vol-name get_vol_name = virsh.vol_name(expected['path']) if get_vol_name.stdout.strip() != expected['name']: logging.error("Volume name mismatch\n" "Expected name: %s\nOutput of vol-name: %s", expected['name'], get_vol_name) # Check against virsh vol-path vol_path = virsh.vol_path(expected['name'], expected['pool_name']) if expected['path'] != vol_path.stdout.strip(): logging.error("Volume path mismatch for volume: %s\n" "Expected path: %s\nOutput of vol-path: %s\n", expected['name'], expected['path'], vol_path) error_count += 1 else: logging.debug("virsh vol-path for volume: %s successfully checked" " against created volume path", expected['name']) # Check path against virsh vol-list if expected['path'] != actual_list['path']: logging.error("Volume path mismatch for volume:%s\n" "Expected Path: %s\nPath from virsh vol-list: %s", expected['name'], expected['path'], actual_list['path']) error_count += 1 else: logging.debug("Path of volume: %s from virsh vol-list " "successfully checked against created " "volume path", expected['name']) # Check path against virsh vol-dumpxml if expected['path'] != volume_xml.path: logging.error("Volume path mismatch for volume: %s\n" "Expected Path: %s\nPath from virsh vol-dumpxml: %s", expected['name'], expected['path'], volume_xml.path) error_count += 1 else: logging.debug("Path of volume: %s from virsh vol-dumpxml " "successfully checked against created volume path", expected['name']) # Check type against virsh vol-list if expected['type'] != actual_list['type']: logging.error("Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-list: %s", expected['name'], expected['type'], actual_list['type']) error_count += 1 else: logging.debug("Type of volume: %s from virsh vol-list " "successfully checked against the created " "volume type", expected['name']) # Check type against virsh vol-info if expected['type'] != actual_info['Type']: logging.error("Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-info: %s", expected['name'], expected['type'], actual_info['Type']) error_count += 1 else: logging.debug("Type of volume: %s from virsh vol-info successfully" " checked against the created volume type", expected['name']) # Check name against virsh vol-info if expected['name'] != actual_info['Name']: logging.error("Volume name mismatch for volume: %s\n" "Expected name: %s\n Name from vol-info: %s", expected['name'], expected['name'], actual_info['Name']) error_count += 1 else: logging.debug("Name of volume: %s from virsh vol-info successfully" " checked against the created volume name", expected['name']) # Check format from against qemu-img info img_info = utils_misc.get_image_info(expected['path']) if expected['format']: if expected['format'] != img_info['format']: logging.error("Volume format mismatch for volume: %s\n" "Expected format: %s\n" "Format from qemu-img info: %s", expected['name'], expected['format'], img_info['format']) error_count += 1 else: logging.debug("Format of volume: %s from qemu-img info " "checked successfully against the created " "volume format", expected['name']) # Check format against vol-dumpxml if expected['format']: if expected['format'] != volume_xml.format: logging.error("Volume format mismatch for volume: %s\n" "Expected format: %s\n" "Format from vol-dumpxml: %s", expected['name'], expected['format'], volume_xml.format) error_count += 1 else: logging.debug("Format of volume: %s from virsh vol-dumpxml " "checked successfully against the created" " volume format", expected['name']) logging.info(expected['encrypt_format']) # Check encrypt against vol-dumpxml if expected['encrypt_format']: # As the 'default' format will change to specific valut(qcow), so # just output it here logging.debug("Encryption format of volume '%s' is: %s", expected['name'], volume_xml.encryption.format) # And also output encryption secret uuid secret_uuid = volume_xml.encryption.secret['uuid'] logging.debug("Encryption secret of volume '%s' is: %s", expected['name'], secret_uuid) if expected['encrypt_secret']: if expected['encrypt_secret'] != secret_uuid: logging.error("Encryption secret mismatch for volume: %s\n" "Expected secret uuid: %s\n" "Secret uuid from vol-dumpxml: %s", expected['name'], expected['encrypt_secret'], secret_uuid) error_count += 1 else: # If no set encryption secret value, automatically # generate a secret value at the time of volume creation logging.debug("Volume encryption secret is %s", secret_uuid) # Check pool name against vol-pool vol_pool = virsh.vol_pool(expected['path']) if expected['pool_name'] != vol_pool.stdout.strip(): logging.error("Pool name mismatch for volume: %s against" "virsh vol-pool", expected['name']) error_count += 1 else: logging.debug("Pool name of volume: %s checked successfully" " against the virsh vol-pool", expected['name']) norm_cap = {} capacity = {} capacity['list'] = actual_list['capacity'] capacity['info'] = actual_info['Capacity'] capacity['xml'] = volume_xml.capacity capacity['qemu_img'] = img_info['vsize'] norm_cap = norm_capacity(capacity) delta_size = params.get('delta_size', "1024") if abs(expected['capacity'] - norm_cap['list']) > delta_size: logging.error("Capacity mismatch for volume: %s against virsh" " vol-list\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['list']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " virsh vol-list for volume %s", expected['name']) if abs(expected['capacity'] - norm_cap['info']) > delta_size: logging.error("Capacity mismatch for volume: %s against virsh" " vol-info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['info']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " virsh vol-info for volume %s", expected['name']) if abs(expected['capacity'] - norm_cap['xml']) > delta_size: logging.error("Capacity mismatch for volume: %s against virsh" " vol-dumpxml\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['xml']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " virsh vol-dumpxml for volume: %s", expected['name']) if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size: logging.error("Capacity mismatch for volume: %s against " "qemu-img info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['qemu_img']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " qemu-img info for volume: %s", expected['name']) return error_count
def run(test, params, env): """ Do test for vol-download and vol-upload Basic steps are 1. Create pool with type defined in cfg 2. Create image with writing data in it 3. Get md5 value before operation 4. Do vol-download/upload with options(offset, length) 5. Check md5 value after operation """ pool_type = params.get("vol_download_upload_pool_type") pool_name = params.get("vol_download_upload_pool_name") pool_target = params.get("vol_download_upload_pool_target") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("vol_download_upload_vol_name") file_name = params.get("vol_download_upload_file_name") file_path = os.path.join(data_dir.get_tmp_dir(), file_name) offset = params.get("vol_download_upload_offset") length = params.get("vol_download_upload_length") capacity = params.get("vol_download_upload_capacity") allocation = params.get("vol_download_upload_allocation") frmt = params.get("vol_download_upload_format") operation = params.get("vol_download_upload_operation") create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes")) setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") b_luks_encrypt = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} sparse_option_support = "yes" == params.get("sparse_option_support", "yes") with_clusterSize = "yes" == params.get("with_clusterSize") vol_clusterSize = params.get("vol_clusterSize", "64") vol_clusterSize_unit = params.get("vol_clusterSize_unit") vol_format = params.get("vol_format", "qcow2") libvirt_version.is_libvirt_feature_supported(params) # libvirt acl polkit related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.error("API acl test not supported in current" " libvirt version.") # Destroy VM. if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest", pre_disk_vol=["50M"]) # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if pool_type == "disk": vol_name = utlv.new_disk_vol_name(pool_name) if vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % vol_name utlv.update_polkit_rule(params, vol_pat, new_value) if create_vol: if b_luks_encrypt: if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS format not supported in " "current libvirt version") params['sec_volume'] = os.path.join(pool_target, vol_name) luks_sec_uuid = utlv.create_secret(params) ret = virsh.secret_set_value(luks_sec_uuid, encryption_password, encode=True) utlv.check_exit_status(ret) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(capacity) vol_arg['allocation'] = int(allocation) if with_clusterSize: vol_arg['format'] = vol_format vol_arg['clusterSize'] = int(vol_clusterSize) vol_arg['clusterSize_unit'] = vol_clusterSize_unit create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name) virsh.pool_refresh(pool_name, debug=True) vol_list = virsh.vol_list(pool_name, debug=True).stdout.strip() # iscsi volume name is different from others if pool_type == "iscsi": # Due to BZ 1843791, the volume cannot be obtained sometimes. if len(vol_list.splitlines()) < 3: test.fail("Failed to get iscsi type volume.") vol_name = vol_list.split('\n')[2].split()[0] vol_path = virsh.vol_path(vol_name, pool_name, ignore_status=False).stdout.strip() logging.debug("vol_path is %s", vol_path) # Add command options if pool_type is not None: options = " --pool %s" % pool_name if offset is not None: options += " --offset %s" % offset offset = int(offset) else: offset = 0 if length is not None: options += " --length %s" % length length = int(length) else: length = 0 logging.debug("%s options are %s", operation, options) if operation == "upload": # write data to file write_file(file_path) # Set length for calculate the offset + length in the following # func get_pre_post_digest() and digest() if length == 0: length = 1048576 def get_pre_post_digest(): """ Get pre region and post region digest if have offset and length :return: pre digest and post digest """ # Get digest of pre region before offset if offset != 0: digest_pre = digest(vol_path, 0, offset) else: digest_pre = 0 logging.debug("pre region digest read from %s 0-%s is %s", vol_path, offset, digest_pre) # Get digest of post region after offset+length digest_post = digest(vol_path, offset + length, 0) logging.debug("post region digest read from %s %s-0 is %s", vol_path, offset + length, digest_post) return (digest_pre, digest_post) # Get pre and post digest before operation for compare (ori_pre_digest, ori_post_digest) = get_pre_post_digest() ori_digest = digest(file_path, 0, 0) logging.debug("ori digest read from %s is %s", file_path, ori_digest) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume upload result = virsh.vol_upload(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation (aft_pre_digest, aft_post_digest) = get_pre_post_digest() aft_digest = digest(vol_path, offset, length) logging.debug("aft digest read from %s is %s", vol_path, aft_digest) # Compare the pre and post part before and after if ori_pre_digest == aft_pre_digest and \ ori_post_digest == aft_post_digest: logging.info("file pre and aft digest match") else: test.fail("file pre or post digests do not" "match, in %s", operation) if operation == "download": # Write data to volume write_file(vol_path) # Record the digest value before operation ori_digest = digest(vol_path, offset, length) logging.debug("original digest read from %s is %s", vol_path, ori_digest) process.run("touch %s" % file_path, ignore_status=True, shell=True) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume download result = virsh.vol_download(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation aft_digest = digest(file_path, 0, 0) logging.debug("new digest read from %s is %s", file_path, aft_digest) if operation != "mix": if result.exit_status != 0: test.fail("Fail to %s volume: %s" % (operation, result.stderr)) # Compare the change part on volume and file if ori_digest == aft_digest: logging.info("file digests match, volume %s succeed", operation) else: test.fail("file digests do not match, volume %s failed" % operation) if operation == "mix": target = params.get("virt_disk_device_target", "vdb") disk_file_path = os.path.join(pool_target, file_name) # Create one disk xml and attach it to VM. custom_disk_xml = create_disk('file', disk_file_path, 'raw', 'file', 'disk', target, 'virtio') ret = virsh.attach_device(vm_name, custom_disk_xml.xml, flagstr="--config", debug=True) libvirt.check_exit_status(ret) if vm.is_dead(): vm.start() # Write 100M data into disk. data_size = 100 write_disk(test, vm, target, data_size) data_size_in_bytes = data_size * 1024 * 1024 # Refresh directory pool. virsh.pool_refresh(pool_name, debug=True) # Download volume to local with sparse option. download_spare_file = "download-sparse.raw" download_file_path = os.path.join(data_dir.get_tmp_dir(), download_spare_file) options += " --sparse" result = virsh.vol_download(file_name, download_file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) libvirt.check_exit_status(result) #Check download image size. one_g_in_bytes = 1073741824 download_img_info = utils_misc.get_image_info(download_file_path) download_disk_size = int(download_img_info['dsize']) if (download_disk_size < data_size_in_bytes or download_disk_size >= one_g_in_bytes): test.fail("download image size:%d is less than the generated " "data size:%d or greater than or equal to 1G." % (download_disk_size, data_size_in_bytes)) # Create one upload sparse image file. upload_sparse_file = "upload-sparse.raw" upload_file_path = os.path.join(pool_target, upload_sparse_file) libvirt.create_local_disk('file', upload_file_path, '1', 'raw') # Refresh directory pool. virsh.pool_refresh(pool_name, debug=True) # Do volume upload, upload sparse file which download last time. result = virsh.vol_upload(upload_sparse_file, download_file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) upload_img_info = utils_misc.get_image_info(upload_file_path) upload_disk_size = int(upload_img_info['dsize']) if (upload_disk_size < data_size_in_bytes or upload_disk_size >= one_g_in_bytes): test.fail("upload image size:%d is less than the generated " "data size:%d or greater than or equal to 1G." % (upload_disk_size, data_size_in_bytes)) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest") for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) if os.path.isfile(file_path): os.remove(file_path)
def run(test, params, env): """ This test cover two volume commands: vol-clone and vol-wipe. 1. Create a given type pool. 2. Create a given format volume in the pool. 3. Clone the new create volume. 4. Wipe the new clone volume. 5. Delete the volume and pool. """ pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if not os.path.dirname(pool_target): pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") vol_name = params.get("vol_name") new_vol_name = params.get("new_vol_name") vol_capability = params.get("vol_capability") vol_allocation = params.get("vol_allocation") vol_format = params.get("vol_format") clone_option = params.get("clone_option", "") wipe_algorithms = params.get("wipe_algorithms") b_luks_encrypted = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] wipe_old_vol = False with_clusterSize = "yes" == params.get("with_clusterSize") vol_clusterSize = params.get("vol_clusterSize", "64") vol_clusterSize_unit = params.get("vol_clusterSize_unit") libvirt_version.is_libvirt_feature_supported(params) if virsh.has_command_help_match("vol-clone", "--prealloc-metadata") is None: if "prealloc-metadata" in clone_option: test.cancel("Option --prealloc-metadata " "is not supported.") clone_status_error = "yes" == params.get("clone_status_error", "no") wipe_status_error = "yes" == params.get("wipe_status_error", "no") setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.cancel("API acl test not supported in current" " libvirt version.") # Using algorithms other than zero need scrub installed. try: utils_path.find_command('scrub') except utils_path.CmdNotFoundError: logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.") valid_algorithms = ["zero"] else: valid_algorithms = [ "zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random" ] # Choose an algorithm randomly if wipe_algorithms: alg = random.choice(wipe_algorithms.split()) else: alg = random.choice(valid_algorithms) libvirt_pvt = utlv.PoolVolumeTest(test, params) libvirt_pool = libvirt_storage.StoragePool() if libvirt_pool.pool_exists(pool_name): test.error("Pool '%s' already exist" % pool_name) try: # Create a new pool disk_vol = [] if pool_type == 'disk': disk_vol.append(params.get("pre_vol", '10M')) libvirt_pvt.pre_pool(pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, pre_disk_vol=disk_vol) libvirt_vol = libvirt_storage.PoolVolume(pool_name) # Create a new volume if vol_format in ['raw', 'qcow2', 'qed', 'vmdk']: if (b_luks_encrypted and vol_format in ['raw', 'qcow2']): if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS is not supported in current" " libvirt version") if vol_format == "qcow2" and not libvirt_version.version_compare( 6, 10, 0): test.cancel("Qcow2 format with luks encryption is not" " supported in current libvirt version") luks_sec_uuid = create_luks_secret( os.path.join(pool_target, vol_name), encryption_password, test) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(vol_capability) vol_arg['allocation'] = int(vol_allocation) vol_arg['format'] = vol_format if with_clusterSize: vol_arg['clusterSize'] = int(vol_clusterSize) vol_arg['clusterSize_unit'] = vol_clusterSize_unit create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: libvirt_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name) elif vol_format == 'partition': vol_name = list(utlv.get_vol_list(pool_name).keys())[0] logging.debug("Find partition %s in disk pool", vol_name) elif vol_format == 'sparse': # Create a sparse file in pool sparse_file = pool_target + '/' + vol_name cmd = "dd if=/dev/zero of=" + sparse_file cmd += " bs=1 count=0 seek=" + vol_capability process.run(cmd, ignore_status=True, shell=True) else: test.error("Unknown volume format %s" % vol_format) # Refresh the pool virsh.pool_refresh(pool_name, debug=True) vol_info = libvirt_vol.volume_info(vol_name) if not vol_info: test.error("Fail to get info of volume %s" % vol_name) for key in vol_info: logging.debug("Original volume info: %s = %s", key, vol_info[key]) # Metadata preallocation is not support for block volume if vol_info["Type"] == "block" and clone_option.count( "prealloc-metadata"): clone_status_error = True if b_luks_encrypted: wipe_old_vol = True if pool_type == "disk": new_vol_name = utlv.new_disk_vol_name(pool_name) if new_vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % new_vol_name utlv.update_polkit_rule(params, vol_pat, new_value) bad_cloned_vol_name = params.get("bad_cloned_vol_name", "") if bad_cloned_vol_name: new_vol_name = bad_cloned_vol_name # Clone volume clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True) if not clone_status_error: if clone_result.exit_status != 0: test.fail("Clone volume fail:\n%s" % clone_result.stderr.strip()) else: vol_info = libvirt_vol.volume_info(new_vol_name) for key in vol_info: logging.debug("Cloned volume info: %s = %s", key, vol_info[key]) logging.debug("Clone volume successfully.") # Wipe the new clone volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = [ "Unsupported algorithm", "no such pattern sequence" ] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(new_vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") elif clone_status_error and clone_result.exit_status == 0: test.fail("Expect clone volume fail, but run" " successfully.") if wipe_old_vol: # Wipe the old volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = [ "Unsupported algorithm", "no such pattern sequence" ] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") if bad_cloned_vol_name: pattern = "volume name '%s' cannot contain '/'" % new_vol_name if re.search(pattern, clone_result.stderr) is None: test.fail("vol-clone failed with unexpected reason") finally: # Clean up try: libvirt_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) except exceptions.TestFail as detail: logging.error(str(detail))
def run(test, params, env): """ This test cover two volume commands: vol-clone and vol-wipe. 1. Create a given type pool. 2. Create a given format volume in the pool. 3. Clone the new create volume. 4. Wipe the new clone volume. 5. Delete the volume and pool. """ pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if not os.path.dirname(pool_target): pool_target = os.path.join(test.tmpdir, pool_target) emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") vol_name = params.get("vol_name") new_vol_name = params.get("new_vol_name") vol_capability = params.get("vol_capability") vol_format = params.get("vol_format") clone_option = params.get("clone_option", "") wipe_algorithms = params.get("wipe_algorithms") if virsh.has_command_help_match("vol-wipe", "--prealloc-metadata") is None: if "prealloc-metadata" in clone_option: raise error.TestNAError("Option --prealloc-metadata " "is not supported.") # Using algorithms other than zero need scrub installed. try: utils_misc.find_command('scrub') except ValueError: logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.") valid_algorithms = ["zero"] else: valid_algorithms = [ "zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random" ] # Choose an algorithms randomly if wipe_algorithms: alg = random.choice(wipe_algorithms.split()) else: alg = random.choice(valid_algorithms) clone_status_error = "yes" == params.get("clone_status_error", "no") wipe_status_error = "yes" == params.get("wipe_status_error", "no") setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: raise error.TestNAError("API acl test not supported in current" " libvirt version.") del_pool = True libv_pvt = libvirt.PoolVolumeTest(test, params) try: libv_pool = libvirt_storage.StoragePool() while libv_pool.pool_exists(pool_name): logging.debug("Use exist pool '%s'", pool_name) del_pool = False else: # Create a new pool disk_vol = [] if pool_type == 'disk': disk_vol.append(params.get("pre_vol", '10M')) libv_pvt.pre_pool(pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, pre_disk_vol=disk_vol) libv_vol = libvirt_storage.PoolVolume(pool_name) if libv_vol.volume_exists(vol_name): logging.debug("Use exist volume '%s'", vol_name) elif vol_format in ['raw', 'qcow2', 'qed', 'vmdk']: # Create a new volume libv_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name) elif vol_format == 'partition': vol_name = libv_vol.list_volumes().keys()[0] logging.debug("Partition %s in disk pool is volume" % vol_name) elif vol_format == 'sparse': # Create a sparse file in pool sparse_file = pool_target + '/' + vol_name cmd = "dd if=/dev/zero of=" + sparse_file cmd += " bs=1 count=0 seek=" + vol_capability utils.run(cmd) else: raise error.TestError("Unknown volume format %s" % vol_format) # Refresh the pool virsh.pool_refresh(pool_name) vol_info = libv_vol.volume_info(vol_name) for key in vol_info: logging.debug("Original volume info: %s = %s", key, vol_info[key]) # Metadata preallocation is not support for block volume if vol_info["Type"] == "block" and clone_option.count( "prealloc-metadata"): clone_status_error = True if pool_type == "disk": new_vol_name = libvirt.new_disk_vol_name(pool_name) if new_vol_name is None: raise error.TestError("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % new_vol_name libvirt.update_polkit_rule(params, vol_pat, new_value) # Clone volume clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True) if not clone_status_error: if clone_result.exit_status != 0: raise error.TestFail("Clone volume fail:\n%s" % clone_result.stderr.strip()) else: vol_info = libv_vol.volume_info(new_vol_name) for key in vol_info: logging.debug("Cloned volume info: %s = %s", key, vol_info[key]) logging.debug("Clone volume successfully.") # Wipe the new clone volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = [ "Unsupported algorithm", "no such pattern sequence" ] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): raise error.TestNAError(wipe_result.stderr) raise error.TestFail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libv_vol.volume_info(new_vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': raise error.TestFail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: raise error.TestFail("Expect wipe volume fail, but run" " successfully.") elif clone_status_error and clone_result.exit_status == 0: raise error.TestFail("Expect clone volume fail, but run" " successfully.") finally: # Clean up try: if del_pool: libv_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) else: # Only delete the volumes libv_vol = libvirt_storage.PoolVolume(pool_name) for vol in [vol_name, new_vol_name]: libv_vol.delete_volume(vol) except error.TestFail, detail: logging.error(str(detail))
def run(test, params, env): """ Do test for vol-download and vol-upload Basic steps are 1. Create pool with type defined in cfg 2. Create image with writing data in it 3. Get md5 value before operation 4. Do vol-download/upload with options(offset, length) 5. Check md5 value after operation """ pool_type = params.get("vol_download_upload_pool_type") pool_name = params.get("vol_download_upload_pool_name") pool_target = params.get("vol_download_upload_pool_target") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_download_upload_vol_name") file_name = params.get("vol_download_upload_file_name") file_path = os.path.join(test.tmpdir, file_name) offset = params.get("vol_download_upload_offset") length = params.get("vol_download_upload_length") capacity = params.get("vol_download_upload_capacity") allocation = params.get("vol_download_upload_allocation") frmt = params.get("vol_download_upload_format") operation = params.get("vol_download_upload_operation") create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes")) # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" + " libvirt version.") try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest", pre_disk_vol=["50M"]) if create_vol: pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name) vol_list = virsh.vol_list(pool_name).stdout.strip() # iscsi volume name is different from others if pool_type == "iscsi": vol_name = vol_list.split('\n')[2].split()[0] vol_path = virsh.vol_path(vol_name, pool_name, ignore_status=False).stdout.strip() logging.debug("vol_path is %s", vol_path) # Add command options if pool_type is not None: options = " --pool %s" % pool_name if offset is not None: options += " --offset %s" % offset offset = int(offset) else: offset = 0 if length is not None: options += " --length %s" % length length = int(length) else: length = 0 logging.debug("%s options are %s", operation, options) if operation == "upload": # write date to file write_file(file_path) # Set length for calculate the offset + length in the following # func get_pre_post_digest() and digest() if length == 0: length = 1048576 def get_pre_post_digest(): """ Get pre region and post region digest if have offset and length :return: pre digest and post digest """ # Get digest of pre region before offset if offset != 0: digest_pre = digest(vol_path, 0, offset) else: digest_pre = 0 logging.debug("pre region digest read from %s 0-%s is %s", vol_path, offset, digest_pre) # Get digest of post region after offset+length digest_post = digest(vol_path, offset + length, 0) logging.debug("post region digest read from %s %s-0 is %s", vol_path, offset + length, digest_post) return (digest_pre, digest_post) # Get pre and post digest before operation for compare (ori_pre_digest, ori_post_digest) = get_pre_post_digest() ori_digest = digest(file_path, 0, 0) logging.debug("ori digest read from %s is %s", file_path, ori_digest) if params.get('setup_libvirt_polkit') == 'yes': utils.run("chmod 666 %s" % file_path) # Do volume upload result = virsh.vol_upload(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation (aft_pre_digest, aft_post_digest) = get_pre_post_digest() aft_digest = digest(vol_path, offset, length) logging.debug("aft digest read from %s is %s", vol_path, aft_digest) # Compare the pre and post part before and after if ori_pre_digest == aft_pre_digest and \ ori_post_digest == aft_post_digest: logging.info("file pre and aft digest match") else: raise error.TestFail("file pre or post digests do not" "match, in %s", operation) if operation == "download": # Write date to volume if pool_type == "disk": utils.run("mkfs.ext3 -F %s" % vol_path) write_file(vol_path) # Record the digest value before operation ori_digest = digest(vol_path, offset, length) logging.debug("original digest read from %s is %s", vol_path, ori_digest) if params.get('setup_libvirt_polkit') == 'yes': utils.run("chmod 666 %s" % file_path) # Do volume download result = virsh.vol_download(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation aft_digest = digest(file_path, 0, 0) logging.debug("new digest read from %s is %s", file_path, aft_digest) if result.exit_status != 0: raise error.TestFail("Fail to %s volume: %s" % (operation, result.stderr)) # Compare the change part on volume and file if ori_digest == aft_digest: logging.info("file digests match, volume %s suceed", operation) else: raise error.TestFail("file digests do not match, volume %s failed", operation) finally: utlv.PoolVolumeTest(test, params).cleanup_pool(pool_name, pool_type, pool_target, "volumetest")
def run(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) vm_state = params.get("vm_state", "running") image_format = params.get("snapshot_image_format", "qcow2") snapshot_del_test = "yes" == params.get("snapshot_del_test", "no") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) snapshot_current = ("yes" == params.get("snapshot_current", "no")) snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused", "no")) replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no") # Pool variables. snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image", "emulated-image") vol_format = params.get("vol_format") lazy_refcounts = "yes" == params.get("lazy_refcounts") options = params.get("snapshot_options", "") export_options = params.get("export_options", "rw,no_root_squash") # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in list(params.keys()): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts supported_pool_list = ["dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster"] if snapshot_with_pool: if pool_type not in supported_pool_list: test.cancel("%s not in support list %s" % (pool_target, supported_pool_list)) # Do xml backup for final recovery vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Some variable for xmlfile of snapshot. snapshot_memory = params.get("snapshot_memory", "internal") snapshot_disk = params.get("snapshot_disk", "internal") no_memory_snap = "yes" == params.get("no_memory_snap", "no") # Skip 'qed' cases for libvirt version greater than 1.1.0 if libvirt_version.version_compare(1, 1, 0): if vol_format == "qed" or image_format == "qed": test.cancel("QED support changed, check bug: " "https://bugzilla.redhat.com/show_bug.cgi" "?id=731570") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_source_protocol == 'gluster': test.cancel("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") # Init snapshot_name snapshot_name = None snapshot_external_disk = [] snapshot_xml_path = None del_status = None image = None pvt = None # Get a tmp dir snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name try: if replace_vm_disk: utlv.set_vm_disk(vm, params, tmp_dir) if multi_gluster_disks: new_params = params.copy() new_params["pool_name"] = "gluster-pool2" new_params["vol_name"] = "gluster-vol2" new_params["disk_target"] = "vdf" new_params["image_convert"] = 'no' utlv.set_vm_disk(vm, new_params, tmp_dir) if snapshot_with_pool: # Create dst pool for create attach vol img pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], source_name=vol_name, export_options=export_options) if pool_type in ["iscsi", "disk"]: # iscsi and disk pool did not support create volume in libvirt, # logical pool could use libvirt to create volume but volume # format is not supported and will be 'raw' as default. pv = libvirt_storage.PoolVolume(pool_name) vols = list(pv.list_volumes().keys()) if vols: vol_name = vols[0] else: test.cancel("No volume in pool: %s" % pool_name) else: # Set volume xml file volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] # Run virsh_vol_create to create vol logging.debug("create volume from xml: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create(pool_name, vol_xml, ignore_status=True, debug=True) if cmd_result.exit_status: test.cancel("Failed to create attach volume.") cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: test.cancel("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() if pool_type in ["logical", "iscsi", "disk"]: # Use qemu-img to format logical, iscsi and disk block device if vol_format != "raw": cmd = "qemu-img create -f %s %s 10M" % (vol_format, img_path) cmd_result = process.run(cmd, ignore_status=True, shell=True) if cmd_result.exit_status: test.cancel("Failed to format volume, %s" % cmd_result.stdout_text.strip()) extra = "--persistent --subdriver %s" % vol_format else: # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format params['image_size'] = "1M" image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) extra = "--persistent --subdriver %s" % image_format if not multi_gluster_disks: # Do the attach action. out = process.run("qemu-img info %s" % img_path, shell=True) logging.debug("The img info is:\n%s" % out.stdout.strip()) result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: test.cancel("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Create snapshot. if snapshot_from_xml: snap_xml = libvirt_xml.SnapshotXML() snapshot_name = "snapshot_test" snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test" if not no_memory_snap: if "--disk-only" not in options: if snapshot_memory == "external": memory_external = os.path.join(tmp_dir, "snapshot_memory") snap_xml.mem_snap_type = snapshot_memory snap_xml.mem_file = memory_external snapshot_external_disk.append(memory_external) else: snap_xml.mem_snap_type = snapshot_memory # Add all disks into xml file. vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') # Remove non-storage disk such as 'cdrom' for disk in disks: if disk.device != 'disk': disks.remove(disk) new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = snapshot_disk disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr if snapshot_disk == 'external': new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: new_file = "%s.snap" % disk_xml.source.attrs['file'] snapshot_external_disk.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif 'name' in disk_xml.source.attrs: new_name = "%s.snap" % disk_xml.source.attrs['name'] new_attrs.update({'name': new_name}) hosts = disk_xml.source.hosts elif ('dev' in disk_xml.source.attrs and disk_xml.type_name == 'block'): # Use local file as external snapshot target for block type. # As block device will be treat as raw format by default, # it's not fit for external disk snapshot target. A work # around solution is use qemu-img again with the target. disk_xml.type_name = 'file' del new_attrs['dev'] new_file = "%s/blk_src_file.snap" % tmp_dir snapshot_external_disk.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) else: del disk_xml.source new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options += " --xmlfile %s " % snapshot_xml_path if vm_state == "shut off": vm.destroy(gracefully=False) snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) out_err = snapshot_result.stderr.strip() if snapshot_result.exit_status: if status_error: return else: if re.search("live disk snapshot not supported with this " "QEMU binary", out_err): test.cancel(out_err) if libvirt_version.version_compare(1, 2, 5): # As commit d2e668e in 1.2.5, internal active snapshot # without memory state is rejected. Handle it as SKIP # for now. This could be supportted in future by bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1103063 if re.search("internal snapshot of a running VM" + " must include the memory state", out_err): test.cancel("Check Bug #1083345, %s" % out_err) test.fail("Failed to create snapshot. Error:%s." % out_err) else: snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if status_error: return else: test.fail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_current: snap_xml = libvirt_xml.SnapshotXML() new_snap = snap_xml.new_from_snapshot_dumpxml(vm_name, snapshot_name) # update an element new_snap.creation_time = snapshot_name snapshot_xml_path = new_snap.xml options += "--redefine %s --current" % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: test.fail("Failed to create snapshot --current." "Error:%s." % snapshot_result.stderr.strip()) if status_error: if not snapshot_del_test: test.fail("Success to create snapshot in negative" " case\nDetail: %s" % snapshot_result) # Touch a file in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path status, output = session.cmd_status_output(echo_cmd) logging.debug("The echo output in domain is: '%s'", output) if status: test.fail("'%s' run failed with '%s'" % (tmp_file_path, output)) status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("File created with content: '%s'", output) session.close() # As only internal snapshot revert works now, let's only do revert # with internal, and move the all skip external cases back to pass. # After external also supported, just move the following code back. if snapshot_disk == 'internal': # Destroy vm for snapshot revert. if not libvirt_version.version_compare(1, 2, 3): virsh.destroy(vm_name) # Revert snapshot. revert_options = "" if snapshot_revert_paused: revert_options += " --paused" revert_result = virsh.snapshot_revert(vm_name, snapshot_name, revert_options, debug=True) if revert_result.exit_status: # Attempts to revert external snapshots will FAIL with an error # "revert to external disk snapshot not supported yet" or "revert # to external snapshot not supported yet" since d410e6f. Thus, # let's check for that and handle as a SKIP for now. Check bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1071264 if re.search("revert to external \w* ?snapshot not supported yet", revert_result.stderr): test.cancel(revert_result.stderr.strip()) else: test.fail("Revert snapshot failed. %s" % revert_result.stderr.strip()) if vm.is_dead(): test.fail("Revert snapshot failed.") if snapshot_revert_paused: if vm.is_paused(): vm.resume() else: test.fail("Revert command successed, but VM is not " "paused after reverting with --paused" " option.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("After revert cat file output='%s'", output) if not status: test.fail("Tmp file exists, revert failed.") # Close the session. session.close() # Test delete snapshot without "--metadata", delete external disk # snapshot will fail for now. # Only do this when snapshot creat succeed which filtered in cfg file. if snapshot_del_test: if snapshot_name: del_result = virsh.snapshot_delete(vm_name, snapshot_name, debug=True, ignore_status=True) del_status = del_result.exit_status snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name if del_status: if not status_error: test.fail("Failed to delete snapshot.") else: if not os.path.exists(snap_xml_path): test.fail("Snapshot xml file %s missing" % snap_xml_path) else: if status_error: err_msg = "Snapshot delete succeed but expect fail." test.fail(err_msg) else: if os.path.exists(snap_xml_path): test.fail("Snapshot xml file %s still" % snap_xml_path + " exist") finally: if vm.is_alive(): vm.destroy(gracefully=False) virsh.detach_disk(vm_name, target="vdf", extra="--persistent") if image: image.remove() if del_status and snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name, "--metadata") for disk in snapshot_external_disk: if os.path.exists(disk): os.remove(disk) vmxml_backup.sync("--snapshots-metadata") libvirtd = utils_libvirtd.Libvirtd() if disk_source_protocol == 'gluster': utlv.setup_or_cleanup_gluster(False, vol_name, brick_path) if multi_gluster_disks: brick_path = os.path.join(tmp_dir, "gluster-pool2") utlv.setup_or_cleanup_gluster(False, "gluster-vol2", brick_path) libvirtd.restart() if snapshot_xml_path: if os.path.exists(snapshot_xml_path): os.unlink(snapshot_xml_path) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, source_name=vol_name) except exceptions.TestFail as detail: libvirtd.restart() logging.error(str(detail))
def run(test, params, env): """ Test virsh vol-create command to cover the following matrix: pool_type = [dir, fs, netfs] volume_format = [raw, bochs, cloop, cow, dmg, iso, qcow, qcow2, qed, vmdk, vpc] pool_type = [disk] volume_format = [none, linux, fat16, fat32, linux-swap, linux-lvm, linux-raid, extended] pool_type = [logical] volume_format = [none] pool_type = [iscsi, scsi] Not supported with format type TODO: pool_type = [rbd, glusterfs] Reference: http://www.libvirt.org/storage.html """ src_pool_type = params.get("src_pool_type") src_pool_target = params.get("src_pool_target") src_pool_format = params.get("src_pool_format", "") pool_vol_num = int(params.get("src_pool_vol_num", '1')) src_emulated_image = params.get("src_emulated_image") extra_option = params.get("extra_option", "") prefix_vol_name = params.get("vol_name", "vol_create_test") vol_format = params.get("vol_format", "raw") vol_capacity = params.get("vol_capacity", 1048576) vol_allocation = params.get("vol_allocation", 1048576) image_size = params.get("emulate_image_size", "1G") lazy_refcounts = "yes" == params.get("lazy_refcounts") status_error = "yes" == params.get("status_error", "no") by_xml = "yes" == params.get("create_vol_by_xml", "yes") incomplete_target = "yes" == params.get("incomplete_target", "no") if not libvirt_version.version_compare(1, 0, 0): if "--prealloc-metadata" in extra_option: raise error.TestNAError("metadata preallocation not supported in" " current libvirt version.") if incomplete_target: raise error.TestNAError("It does not support generate target " "path in thi libvirt version.") pool_type = ['dir', 'disk', 'fs', 'logical', 'netfs', 'iscsi', 'scsi'] if src_pool_type not in pool_type: raise error.TestNAError("pool type %s not in supported type list: %s" % (src_pool_type, pool_type)) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts def post_process_vol(ori_vol_path): """ Create or disactive a volume without libvirt :param ori_vol_path: Full path of an original volume :retur: Volume name for checking """ process_vol_name = params.get("process_vol_name", "process_vol") process_vol_options = params.get("process_vol_options", "") process_vol_capacity = params.get("process_vol_capacity", vol_capacity) process_vol_cmd = "" unsupport_err = "Unsupport do '%s %s' in this test" % ( process_vol_by, process_vol_type) if process_vol_by == "lvcreate": process_vol_cmd = "lvcreate -L %s " % process_vol_capacity if process_vol_type == "thin": if not process_vol_options: process_vol_options = "-T " process_vol_cmd += "%s " % process_vol_options processthin_pool_name = params.get("processthin_pool_name", "thinpool") processthin_vol_name = params.get("processthin_vol_name", "thinvol") process_vol_capacity = params.get("process_vol_capacity", "1G") os.path.dirname(ori_vol_path) process_vol_cmd += "%s/%s " % (os.path.dirname(ori_vol_path), processthin_pool_name) process_vol_cmd += "-V %s " % process_vol_capacity process_vol_cmd += "-n %s " % processthin_vol_name process_vol_name = processthin_vol_name elif process_vol_type == "snapshot": if not process_vol_options: process_vol_options = "-s " process_vol_cmd += "%s " % process_vol_options process_vol_cmd += "-n %s " % process_vol_name process_vol_cmd += "%s " % (ori_vol_path) else: logging.error(unsupport_err) return elif process_vol_by == "qemu-img" and process_vol_type == "create": process_vol_cmd = "qemu-img create " process_vol_path = os.path.dirname(ori_vol_path) + "/" process_vol_path += process_vol_name process_vol_cmd += "%s " % process_vol_options process_vol_cmd += "%s " % process_vol_path process_vol_cmd += "%s " % process_vol_capacity elif process_vol_by == "lvchange" and process_vol_type == "deactivate": process_vol_cmd = "lvchange %s " % ori_vol_path if not process_vol_options: process_vol_options = "-an" process_vol_cmd += process_vol_options else: logging.error(unsupport_err) return rst = utils.run(process_vol_cmd, ignore_status=True) if rst.exit_status: if "Snapshots of snapshots are not supported" in rst.stderr: logging.debug("%s is already a snapshot volume", ori_vol_path) process_vol_name = os.path.basename(ori_vol_path) else: logging.error(rst.stderr) return return process_vol_name def check_vol(pool_name, vol_name, expect_exist=True): """ Check volume vol_name in pool pool_name """ src_volumes = src_pv.list_volumes().keys() logging.debug("Current volumes in %s: %s", pool_name, src_volumes) if expect_exist: if vol_name not in src_volumes: raise error.TestFail("Can't find volume %s in pool %s" % (vol_name, pool_name)) # check format in volume xml post_xml = volxml.new_from_vol_dumpxml(vol_name, pool_name) logging.debug("Volume %s XML: %s" % (vol_name, post_xml.xmltreefile)) if 'format' in post_xml.keys() and vol_format is not None: if post_xml.format != vol_format: raise error.TestFail("Volume format %s is not expected" % vol_format + " as defined.") else: if vol_name in src_volumes: raise error.TestFail( "Find volume %s in pool %s, but expect not" % (vol_name, pool_name)) fmt_err0 = "Unknown file format '%s'" % vol_format fmt_err1 = "Formatting or formatting option not " fmt_err1 += "supported for file format '%s'" % vol_format fmt_err2 = "Driver '%s' does not support " % vol_format fmt_err2 += "image creation" fmt_err_list = [fmt_err0, fmt_err1, fmt_err2] skip_msg = "Volume format '%s' is not supported by qemu-img" % vol_format vol_path_list = [] try: # Create the src pool src_pool_name = "virt-%s-pool" % src_pool_type pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(src_pool_name, src_pool_type, src_pool_target, src_emulated_image, image_size=image_size, source_format=src_pool_format) src_pv = libvirt_storage.PoolVolume(src_pool_name) # Print current pools for debugging logging.debug("Current pools:%s", libvirt_storage.StoragePool().list_pools()) # Create volumes by virsh in a loop while pool_vol_num > 0: # Set volume xml file vol_name = prefix_vol_name + "_%s" % pool_vol_num pool_vol_num -= 1 if by_xml: # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if src_pool_type == "disk": vol_name = utlv.new_disk_vol_name(src_pool_name) if vol_name is None: raise error.TestError("Fail to generate volume name") vol_arg['name'] = vol_name volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] if params.get('setup_libvirt_polkit') == 'yes': utils.run("chmod 666 %s" % vol_xml, ignore_status=True) # Run virsh_vol_create to create vol logging.debug("Create volume from XML: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create( src_pool_name, vol_xml, extra_option, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) else: # Run virsh_vol_create_as to create_vol cmd_result = virsh.vol_create_as( vol_name, src_pool_name, vol_capacity, vol_allocation, vol_format, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) # Check result try: utlv.check_exit_status(cmd_result, status_error) check_vol(src_pool_name, vol_name, not status_error) if not status_error: vol_path = virsh.vol_path(vol_name, src_pool_name).stdout.strip() logging.debug("Full path of %s: %s", vol_name, vol_path) vol_path_list.append(vol_path) except error.TestFail, e: stderr = cmd_result.stderr if any(err in stderr for err in fmt_err_list): raise error.TestNAError(skip_msg) else: raise e # Post process vol by other programs process_vol_by = params.get("process_vol_by") process_vol_type = params.get("process_vol_type", "") expect_vol_exist = "yes" == params.get("expect_vol_exist", "yes") if process_vol_by and vol_path_list: process_vol = post_process_vol(vol_path_list[0]) if process_vol is not None: try: virsh.pool_refresh(src_pool_name, ignore_status=False) check_vol(src_pool_name, process_vol, expect_vol_exist) except (error.CmdError, error.TestFail), e: if process_vol_type == "thin": logging.error(e) raise error.TestNAError( "You may encounter bug BZ#1060287") else: raise e else: raise error.TestFail("Post process volume failed")
cmd_result = virsh.vol_create( src_pool_name, vol_xml, extra_option, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) else: # Run virsh_vol_create_as to create_vol cmd_result = virsh.vol_create_as( vol_name, src_pool_name, vol_capacity, vol_allocation, vol_format, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) # Check result try: utlv.check_exit_status(cmd_result, status_error) check_vol(src_pool_name, vol_name, not status_error) if not status_error: vol_path = virsh.vol_path(vol_name, src_pool_name).stdout.strip() logging.debug("Full path of %s: %s", vol_name, vol_path) vol_path_list.append(vol_path) except error.TestFail, e: stderr = cmd_result.stderr if any(err in stderr for err in fmt_err_list): raise error.TestNAError(skip_msg) else: raise e # Post process vol by other programs process_vol_by = params.get("process_vol_by") process_vol_type = params.get("process_vol_type", "") expect_vol_exist = "yes" == params.get("expect_vol_exist", "yes") if process_vol_by and vol_path_list: process_vol = post_process_vol(vol_path_list[0]) if process_vol is not None:
def run(test, params, env): """ Test svirt in adding disk to VM. (1).Init variables for test. (2).Create a image to attached to VM. (3).Attach disk. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic") sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux") sec_label = params.get("svirt_attach_disk_vm_sec_label", None) sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes") sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label, 'relabel': sec_relabel} # Get variables about pool vol with_pool_vol = 'yes' == params.get("with_pool_vol", "no") check_cap_rawio = "yes" == params.get("check_cap_rawio", "no") virt_use_nfs = params.get("virt_use_nfs", "off") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Get varialbles about image. img_label = params.get('svirt_attach_disk_disk_label') img_name = "svirt_disk" # Default label for the other disks. # To ensure VM is able to access other disks. default_label = params.get('svirt_attach_disk_disk_default_label', None) # Set selinux of host. backup_sestatus = utils_selinux.get_status() utils_selinux.set_status(host_sestatus) # Set the default label to other disks of vm. disks = vm.get_disk_devices() for disk in disks.values(): utils_selinux.set_context_of_file(filename=disk['source'], context=default_label) pvt = None qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # set qemu conf if check_cap_rawio: qemu_conf.user = '******' qemu_conf.group = 'root' logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() # Set the context of the VM. vmxml.set_seclabel([sec_dict]) vmxml.sync() logging.debug("the domain xml is: %s" % vmxml.xmltreefile) if with_pool_vol: # Create dst pool for create attach vol img pvt = utlv.PoolVolumeTest(test, params) logging.debug("pool_type %s" % pool_type) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"]) if pool_type in ["iscsi", "disk"]: # iscsi and disk pool did not support create volume in libvirt, # logical pool could use libvirt to create volume but volume # format is not supported and will be 'raw' as default. pv = libvirt_storage.PoolVolume(pool_name) vols = pv.list_volumes().keys() if vols: vol_name = vols[0] else: raise error.TestNAError("No volume in pool: %s" % pool_name) else: vol_arg = {'name': vol_name, 'format': vol_format, 'capacity': 1073741824, 'allocation': 1048576, } # Set volume xml file volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] # Run virsh_vol_create to create vol logging.debug("create volume from xml: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create(pool_name, vol_xml, ignore_status=True, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to create attach volume.") cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() if pool_type in ["iscsi", "disk"]: extra = "--driver qemu --type lun --rawio --persistent" else: extra = "--persistent --subdriver qcow2" # set host_sestatus as nfs pool will reset it utils_selinux.set_status(host_sestatus) # set virt_use_nfs result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs) if result.exit_status: raise error.TestNAError("Failed to set virt_use_nfs value") else: # Init a QemuImg instance. params['image_name'] = img_name tmp_dir = data_dir.get_tmp_dir() image = qemu_storage.QemuImg(params, tmp_dir, img_name) # Create a image. img_path, result = image.create(params) # Set the context of the image. utils_selinux.set_context_of_file(filename=img_path, context=img_label) extra = "--persistent" # Do the attach action. result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: raise error.TestFail("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. # VM with set seclabel can access the image with the # set context. if status_error: raise error.TestFail('Test succeeded in negative case.') if check_cap_rawio: cap_list = ['CapPrm', 'CapEff', 'CapBnd'] cap_dict = {} pid = vm.get_pid() pid_status_path = "/proc/%s/status" % pid with open(pid_status_path) as f: for line in f: val_list = line.split(":") if val_list[0] in cap_list: cap_dict[val_list[0]] = int(val_list[1].strip(), 16) # bit and with rawio capabilitiy value to check cap_sys_rawio # is set cap_rawio_val = 0x0000000000020000 for i in cap_list: if not cap_rawio_val & cap_dict[i]: err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i]) err_msg += " lack cap_sys_rawio capabilities" raise error.TestFail(err_msg) else: inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i]) inf_msg += " have cap_sys_rawio capabilities" logging.debug(inf_msg) except virt_vm.VMStartError, e: # Starting VM failed. # VM with set seclabel can not access the image with the # set context. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", debug=True) except error.CmdError: raise error.TestFail("Detach disk 'vdf' from VM %s failed." % vm.name)
def check_vol(expected, avail=True): """ Checks the expected volume details with actual volume details from vol-dumpxml vol-list vol-info vol-key vol-path qemu-img info """ error_count = 0 volume_xml = {} (isavail, actual_list) = get_vol_list(expected['pool_name'], expected['name']) actual_info = get_vol_info(expected['pool_name'], expected['name']) if not avail: if isavail: error_count += 1 logging.error("Deleted vol: %s is still shown in vol-list", expected['name']) else: logging.info("Volume %s checked successfully for deletion", expected['name']) return error_count else: if not isavail: logging.error("Volume list does not show volume %s", expected['name']) logging.error("Volume creation failed") error_count += 1 # Get values from vol-dumpxml volume_xml = vol_xml.VolXML.get_vol_details_by_name(expected['name'], expected['pool_name']) # Check against virsh vol-key vol_key = virsh.vol_key(expected['name'], expected['pool_name']) if vol_key.stdout.strip() != volume_xml['key']: logging.error("Volume key is mismatch \n%s" "Key from xml: %s\n Key from command: %s", expected['name'], volume_xml['key'], vol_key) error_count += 1 else: logging.debug("virsh vol-key for volume: %s successfully" " checked against vol-dumpxml", expected['name']) # Check against virsh vol-name get_vol_name = virsh.vol_name(expected['path']) if get_vol_name.stdout.strip() != expected['name']: logging.error("Volume name mismatch\n" "Expected name: %s\n Output of vol-name: %s", expected['name'], get_vol_name) # Check against virsh vol-path vol_path = virsh.vol_path(expected['name'], expected['pool_name']) if expected['path'] != vol_path.stdout.strip(): logging.error("Volume path mismatch for volume: %s\n" "Expected path: %s\n Output of vol-path: %s\n", expected['name'], expected['path'], vol_path) error_count += 1 else: logging.debug("virsh vol-path for volume: %s successfully checked" " against created volume path", expected['name']) # Check path against virsh vol-list if isavail: if expected['path'] != actual_list['path']: logging.error("Volume path mismatch for volume:%s\n" "Expected Path: %s\n Path from virsh vol-list: %s", expected[ 'name'], expected['path'], actual_list['path']) error_count += 1 else: logging.debug("Path of volume: %s from virsh vol-list " "successfully checked against created " "volume path", expected['name']) # Check path against virsh vol-dumpxml if expected['path'] != volume_xml['path']: logging.error("Volume path mismatch for volume: %s\n" "Expected Path: %s\n Path from virsh vol-dumpxml: %s", expected['name'], expected['path'], volume_xml['path']) error_count += 1 else: logging.debug("Path of volume: %s from virsh vol-dumpxml " "successfully checked against created volume path", expected['name']) # Check type against virsh vol-list if isavail: if expected['type'] != actual_list['type']: logging.error("Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-list: %s", expected['name'], expected['type'], actual_list['type']) error_count += 1 else: logging.debug("Type of volume: %s from virsh vol-list " "successfully checked against the created " "volume type", expected['name']) # Check type against virsh vol-info if expected['type'] != actual_info['Type']: logging.error("Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-info: %s", expected['name'], expected['type'], actual_info['Type']) error_count += 1 else: logging.debug("Type of volume: %s from virsh vol-info successfully" " checked against the created volume type", expected['name']) # Check name against virsh vol-info if expected['name'] != actual_info['Name']: logging.error("Volume name mismatch for volume: %s\n" "Expected name: %s\n Name from vol-info: %s", expected['name'], expected['name'], actual_info['Name']) error_count += 1 else: logging.debug("Name of volume: %s from virsh vol-info successfully" " checked against the created volume name", expected['name']) # Check format from against qemu-img info img_info = utils_misc.get_image_info(expected['path']) if expected['format'] != img_info['format']: logging.error("Volume format mismatch for volume: %s\n" "Expected format: %s\n Format from qemu-img info: %s", expected['name'], expected['format'], img_info['format']) error_count += 1 else: logging.debug("Format of volume: %s from qemu-img info checked " "successfully against the created volume format", expected['name']) # Check format against vol-dumpxml if expected['format'] != volume_xml['format']: logging.error("Volume format mismatch for volume: %s\n" "Expected format: %s\n Format from vol-dumpxml: %s", expected['name'], expected['format'], volume_xml['format']) error_count += 1 else: logging.debug("Format of volume: %s from virsh vol-dumpxml checked" " successfully against the created volume format", expected['name']) # Check pool name against vol-pool vol_pool = virsh.vol_pool(expected['path']) if expected['pool_name'] != vol_pool.stdout.strip(): logging.error("Pool name mismatch for volume: %s against" "virsh vol-pool", expected['name']) error_count += 1 else: logging.debug("Pool name of volume: %s checked successfully" " against the virsh vol-pool", expected['name']) norm_cap = {} capacity = {} capacity['list'] = actual_list['capacity'] capacity['info'] = actual_info['Capacity'] capacity['xml'] = volume_xml['capacity'] capacity['qemu_img'] = img_info['vsize'] norm_cap = norm_capacity(capacity) if expected['capacity'] != norm_cap['list']: logging.error("Capacity mismatch for volume: %s against virsh" " vol-list\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['list']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " virsh vol-list for volume %s", expected['name']) if expected['capacity'] != norm_cap['info']: logging.error("Capacity mismatch for volume: %s against virsh" " vol-info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['info']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " virsh vol-info for volume %s", expected['name']) if expected['capacity'] != norm_cap['xml']: logging.error("Capacity mismatch for volume: %s against virsh" " vol-dumpxml\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['xml']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " virsh vol-dumpxml for volume: %s", expected['name']) if expected['capacity'] != norm_cap['qemu_img']: logging.error("Capacity mismatch for volume: %s against " "qemu-img info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['qemu_img']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " qemu-img info for volume: %s", expected['name']) return error_count
def run(test, params, env): """ This test cover two volume commands: vol-clone and vol-wipe. 1. Create a given type pool. 2. Create a given format volume in the pool. 3. Clone the new create volume. 4. Wipe the new clone volume. 5. Delete the volume and pool. """ pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if not os.path.dirname(pool_target): pool_target = os.path.join(test.tmpdir, pool_target) emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") vol_name = params.get("vol_name") new_vol_name = params.get("new_vol_name") vol_capability = params.get("vol_capability") vol_format = params.get("vol_format") clone_option = params.get("clone_option", "") wipe_algorithms = params.get("wipe_algorithms") if virsh.has_command_help_match("vol-wipe", "--prealloc-metadata") is None: if "prealloc-metadata" in clone_option: raise error.TestNAError("Option --prealloc-metadata " "is not supported.") # Using algorithms other than zero need scrub installed. try: utils_misc.find_command("scrub") except ValueError: logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.") valid_algorithms = ["zero"] else: valid_algorithms = ["zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random"] # Choose an algorithms randomly if wipe_algorithms: alg = random.choice(wipe_algorithms.split()) else: alg = random.choice(valid_algorithms) clone_status_error = "yes" == params.get("clone_status_error", "no") wipe_status_error = "yes" == params.get("wipe_status_error", "no") setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get("unprivileged_user") if unpri_user: if unpri_user.count("EXAMPLE"): unpri_user = "******" if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: raise error.TestNAError("API acl test not supported in current" " libvirt version.") del_pool = True libv_pvt = libvirt.PoolVolumeTest(test, params) try: libv_pool = libvirt_storage.StoragePool() while libv_pool.pool_exists(pool_name): logging.debug("Use exist pool '%s'", pool_name) del_pool = False else: # Create a new pool disk_vol = [] if pool_type == "disk": disk_vol.append(params.get("pre_vol", "10M")) libv_pvt.pre_pool( pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, pre_disk_vol=disk_vol, ) libv_vol = libvirt_storage.PoolVolume(pool_name) if libv_vol.volume_exists(vol_name): logging.debug("Use exist volume '%s'", vol_name) elif vol_format in ["raw", "qcow2", "qed", "vmdk"]: # Create a new volume libv_pvt.pre_vol( vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name ) elif vol_format == "partition": vol_name = libv_vol.list_volumes().keys()[0] logging.debug("Partition %s in disk pool is volume" % vol_name) elif vol_format == "sparse": # Create a sparse file in pool sparse_file = pool_target + "/" + vol_name cmd = "dd if=/dev/zero of=" + sparse_file cmd += " bs=1 count=0 seek=" + vol_capability utils.run(cmd) else: raise error.TestError("Unknown volume format %s" % vol_format) # Refresh the pool virsh.pool_refresh(pool_name) vol_info = libv_vol.volume_info(vol_name) for key in vol_info: logging.debug("Original volume info: %s = %s", key, vol_info[key]) # Metadata preallocation is not support for block volume if vol_info["Type"] == "block" and clone_option.count("prealloc-metadata"): clone_status_error = True if pool_type == "disk": new_vol_name = libvirt.new_disk_vol_name(pool_name) if new_vol_name is None: raise error.TestError("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % new_vol_name libvirt.update_polkit_rule(params, vol_pat, new_value) # Clone volume clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True) if not clone_status_error: if clone_result.exit_status != 0: raise error.TestFail("Clone volume fail:\n%s" % clone_result.stderr.strip()) else: vol_info = libv_vol.volume_info(new_vol_name) for key in vol_info: logging.debug("Cloned volume info: %s = %s", key, vol_info[key]) logging.debug("Clone volume successfully.") # Wipe the new clone volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe( new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True ) unsupported_err = ["Unsupported algorithm", "no such pattern sequence"] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): raise error.TestNAError(wipe_result.stderr) raise error.TestFail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libv_vol.volume_info(new_vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info["format"] != "raw": raise error.TestFail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: raise error.TestFail("Expect wipe volume fail, but run" " successfully.") elif clone_status_error and clone_result.exit_status == 0: raise error.TestFail("Expect clone volume fail, but run" " successfully.") finally: # Clean up try: if del_pool: libv_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) else: # Only delete the volumes libv_vol = libvirt_storage.PoolVolume(pool_name) for vol in [vol_name, new_vol_name]: libv_vol.delete_volume(vol) except error.TestFail, detail: logging.error(str(detail))
def run(test, params, env): """ Test svirt in adding disk to VM. (1).Init variables for test. (2).Create a image to attached to VM. (3).Attach disk. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic") sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux") sec_label = params.get("svirt_attach_disk_vm_sec_label", None) sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes") sec_dict = { 'type': sec_type, 'model': sec_model, 'label': sec_label, 'relabel': sec_relabel } disk_seclabel = params.get("disk_seclabel", "no") # Get variables about pool vol with_pool_vol = 'yes' == params.get("with_pool_vol", "no") check_cap_rawio = "yes" == params.get("check_cap_rawio", "no") virt_use_nfs = params.get("virt_use_nfs", "off") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format", "qcow2") device_target = params.get("disk_target") device_bus = params.get("disk_target_bus") device_type = params.get("device_type", "file") # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Get varialbles about image. img_label = params.get('svirt_attach_disk_disk_label') sec_disk_dict = { 'model': sec_model, 'label': img_label, 'relabel': sec_relabel } enable_namespace = 'yes' == params.get('enable_namespace', 'no') img_name = "svirt_disk" # Default label for the other disks. # To ensure VM is able to access other disks. default_label = params.get('svirt_attach_disk_disk_default_label', None) # Set selinux of host. backup_sestatus = utils_selinux.get_status() utils_selinux.set_status(host_sestatus) # Set the default label to other disks of vm. disks = vm.get_disk_devices() for disk in list(disks.values()): utils_selinux.set_context_of_file(filename=disk['source'], context=default_label) pvt = None qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() disk_xml = Disk(type_name=device_type) disk_xml.device = "disk" try: # set qemu conf if check_cap_rawio: qemu_conf.user = '******' qemu_conf.group = 'root' logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() if with_pool_vol: # Create dst pool for create attach vol img pvt = utlv.PoolVolumeTest(test, params) logging.debug("pool_type %s" % pool_type) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"]) if pool_type in ["iscsi", "disk"]: # iscsi and disk pool did not support create volume in libvirt, # logical pool could use libvirt to create volume but volume # format is not supported and will be 'raw' as default. pv = libvirt_storage.PoolVolume(pool_name) vols = list(pv.list_volumes().keys()) vol_format = "raw" if vols: vol_name = vols[0] else: test.cancel("No volume in pool: %s" % pool_name) else: vol_arg = { 'name': vol_name, 'format': vol_format, 'capacity': 1073741824, 'allocation': 1048576, } # Set volume xml file volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] # Run virsh_vol_create to create vol logging.debug("create volume from xml: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create(pool_name, vol_xml, ignore_status=True, debug=True) if cmd_result.exit_status: test.cancel("Failed to create attach volume.") cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: test.cancel("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() if pool_type in ["iscsi", "disk"]: source_type = "dev" if pool_type == "iscsi": disk_xml.device = "lun" disk_xml.rawio = "yes" else: if not enable_namespace: qemu_conf.namespaces = '' logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() else: source_type = "file" # set host_sestatus as nfs pool will reset it utils_selinux.set_status(host_sestatus) # set virt_use_nfs result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True) if result.exit_status: test.cancel("Failed to set virt_use_nfs value") else: source_type = "file" # Init a QemuImg instance. params['image_name'] = img_name tmp_dir = data_dir.get_tmp_dir() image = qemu_storage.QemuImg(params, tmp_dir, img_name) # Create a image. img_path, result = image.create(params) # Set the context of the image. if sec_relabel == "no": utils_selinux.set_context_of_file(filename=img_path, context=img_label) disk_xml.target = {"dev": device_target, "bus": device_bus} disk_xml.driver = {"name": "qemu", "type": vol_format} if disk_seclabel == "yes": source_seclabel = [] sec_xml = seclabel.Seclabel() sec_xml.update(sec_disk_dict) source_seclabel.append(sec_xml) disk_source = disk_xml.new_disk_source(**{ "attrs": { source_type: img_path }, "seclabels": source_seclabel }) else: disk_source = disk_xml.new_disk_source( **{"attrs": { source_type: img_path }}) # Set the context of the VM. vmxml.set_seclabel([sec_dict]) vmxml.sync() disk_xml.source = disk_source logging.debug(disk_xml) # Do the attach action. cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml.xml, flagstr='--persistent') libvirt.check_exit_status(cmd_result, expect_error=False) logging.debug("the domain xml is: %s" % vmxml.xmltreefile) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. # VM with set seclabel can access the image with the # set context. if status_error: test.fail('Test succeeded in negative case.') if check_cap_rawio: cap_list = ['CapPrm', 'CapEff', 'CapBnd'] cap_dict = {} pid = vm.get_pid() pid_status_path = "/proc/%s/status" % pid with open(pid_status_path) as f: for line in f: val_list = line.split(":") if val_list[0] in cap_list: cap_dict[val_list[0]] = int( val_list[1].strip(), 16) # bit and with rawio capabilitiy value to check cap_sys_rawio # is set cap_rawio_val = 0x0000000000020000 for i in cap_list: if not cap_rawio_val & cap_dict[i]: err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i]) err_msg += " lack cap_sys_rawio capabilities" test.fail(err_msg) else: inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i]) inf_msg += " have cap_sys_rawio capabilities" logging.debug(inf_msg) if pool_type == "disk": if libvirt_version.version_compare(3, 1, 0) and enable_namespace: vm_pid = vm.get_pid() output = process.system_output( "nsenter -t %d -m -- ls -Z %s" % (vm_pid, img_path)) else: output = process.system_output('ls -Z %s' % img_path) logging.debug("The default label is %s", default_label) logging.debug("The label after guest started is %s", to_text(output.strip().split()[-2])) if default_label not in to_text(output.strip().split()[-2]): test.fail("The label is wrong after guest started\n") except virt_vm.VMStartError as e: # Starting VM failed. # VM with set seclabel can not access the image with the # set context. if not status_error: test.fail("Test failed in positive case." "error: %s" % e) cmd_result = virsh.detach_device(domainarg=vm_name, filearg=disk_xml.xml) libvirt.check_exit_status(cmd_result, status_error) finally: # clean up vm.destroy() if not with_pool_vol: image.remove() if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) except exceptions.TestFail as detail: logging.error(str(detail)) backup_xml.sync() utils_selinux.set_status(backup_sestatus) if check_cap_rawio: qemu_conf.restore() libvirtd.restart()
def run(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) image_format = params.get("snapshot_image_format", "qcow2") snapshot_del_test = "yes" == params.get("snapshot_del_test", "no") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) snapshot_current = ("yes" == params.get("snapshot_current", "no")) snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused", "no")) # Pool variables. snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") lazy_refcounts = "yes" == params.get("lazy_refcounts") options = params.get("snapshot_options", "") # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts supported_pool_list = ["dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster"] if snapshot_with_pool: if pool_type not in supported_pool_list: raise error.TestNAError("%s not in support list %s" % (pool_target, supported_pool_list)) # Do xml backup for final recovery vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Some variable for xmlfile of snapshot. snapshot_memory = params.get("snapshot_memory", "internal") snapshot_disk = params.get("snapshot_disk", "internal") # Skip 'qed' cases for libvirt version greater than 1.1.0 if libvirt_version.version_compare(1, 1, 0): if vol_format == "qed": raise error.TestNAError("QED support changed, check bug: " "https://bugzilla.redhat.com/show_bug.cgi" "?id=731570") # Init snapshot_name snapshot_name = None snapshot_external_disk = [] snapshot_xml_path = None del_status = None image = None pvt = None # Get a tmp dir tmp_dir = data_dir.get_tmp_dir() snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name try: if snapshot_with_pool: # Create dst pool for create attach vol img pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"]) if pool_type in ["iscsi", "disk"]: # iscsi and disk pool did not support create volume in libvirt, # logical pool could use libvirt to create volume but volume # format is not supported and will be 'raw' as default. pv = libvirt_storage.PoolVolume(pool_name) vols = pv.list_volumes().keys() if vols: vol_name = vols[0] else: raise error.TestNAError("No volume in pool: %s", pool_name) else: # Set volume xml file volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] # Run virsh_vol_create to create vol logging.debug("create volume from xml: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create(pool_name, vol_xml, ignore_status=True, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to create attach volume.") cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() if pool_type in ["logical", "iscsi", "disk"]: # Use qemu-img to format logical, iscsi and disk block device if vol_format != "raw": cmd = "qemu-img create -f %s %s 10M" % (vol_format, img_path) cmd_result = utils.run(cmd, ignore_status=True) if cmd_result.exit_status: raise error.TestNAError("Failed to format volume, %s" % cmd_result.stdout.strip()) extra = "--persistent --subdriver %s" % vol_format else: # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format params['image_size'] = "1M" image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) extra = "--persistent --subdriver %s" % image_format # Do the attach action. out = utils.run("qemu-img info %s" % img_path) logging.debug("The img info is:\n%s" % out.stdout.strip()) result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: raise error.TestNAError("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Create snapshot. if snapshot_from_xml: snapshot_name = "snapshot_test" lines = ["<domainsnapshot>\n", "<name>%s</name>\n" % snapshot_name, "<description>Snapshot Test</description>\n"] if snapshot_memory == "external": memory_external = os.path.join(tmp_dir, "snapshot_memory") snapshot_external_disk.append(memory_external) lines.append("<memory snapshot=\'%s\' file='%s'/>\n" % (snapshot_memory, memory_external)) else: lines.append("<memory snapshot='%s'/>\n" % snapshot_memory) # Add all disks into xml file. disks = vm.get_disk_devices().values() lines.append("<disks>\n") for disk in disks: lines.append("<disk name='%s' snapshot='%s'>\n" % (disk['source'], snapshot_disk)) if snapshot_disk == "external": snap_path = "%s.snap" % os.path.basename(disk['source']) disk_external = os.path.join(tmp_dir, snap_path) snapshot_external_disk.append(disk_external) lines.append("<source file='%s'/>\n" % disk_external) lines.append("</disk>\n") lines.append("</disks>\n") lines.append("</domainsnapshot>") snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() logging.debug("The xml content for snapshot create is:") with open(snapshot_xml_path, 'r') as fin: logging.debug(fin.read()) options += " --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) out_err = snapshot_result.stderr.strip() if snapshot_result.exit_status: if status_error: return else: if re.search("live disk snapshot not supported with this QEMU binary", out_err): raise error.TestNAError(out_err) if libvirt_version.version_compare(1, 2, 5): # As commit d2e668e in 1.2.5, internal active snapshot # without memory state is rejected. Handle it as SKIP # for now. This could be supportted in future by bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1103063 if re.search("internal snapshot of a running VM" + " must include the memory state", out_err): raise error.TestNAError("Check Bug #1083345, %s" % out_err) raise error.TestFail("Failed to create snapshot. Error:%s." % out_err) else: snapshot_result = virsh.snapshot_create(vm_name, options) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_current: lines = ["<domainsnapshot>\n", "<description>Snapshot Test</description>\n", "<state>running</state>\n", "<creationTime>%s</creationTime>" % snapshot_name, "</domainsnapshot>"] snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() logging.debug("The xml content for snapshot create is:") with open(snapshot_xml_path, 'r') as fin: logging.debug(fin.read()) options += "--redefine %s --current" % snapshot_xml_path if snapshot_result.exit_status: raise error.TestFail("Failed to create snapshot --current." "Error:%s." % snapshot_result.stderr.strip()) if status_error: if not snapshot_del_test: raise error.TestFail("Success to create snapshot in negative" " case\nDetail: %s" % snapshot_result) # Touch a file in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path status, output = session.cmd_status_output(echo_cmd) logging.debug("The echo output in domain is: '%s'", output) if status: raise error.TestFail("'%s' run failed with '%s'" % (tmp_file_path, output)) status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("File created with content: '%s'", output) session.close() # Destroy vm for snapshot revert. if not libvirt_version.version_compare(1, 2, 3): virsh.destroy(vm_name) # Revert snapshot. revert_options = "" if snapshot_revert_paused: revert_options += " --paused" revert_result = virsh.snapshot_revert(vm_name, snapshot_name, revert_options, debug=True) if revert_result.exit_status: # As commit d410e6f for libvirt 1.2.3, attempts to revert external # snapshots will FAIL with an error "revert to external snapshot # not supported yet". Thus, let's check for that and handle as a # SKIP for now. Check bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1071264 if libvirt_version.version_compare(1, 2, 3): if re.search("revert to external snapshot not supported yet", revert_result.stderr): raise error.TestNAError(revert_result.stderr.strip()) else: raise error.TestFail("Revert snapshot failed. %s" % revert_result.stderr.strip()) if vm.is_dead(): raise error.TestFail("Revert snapshot failed.") if snapshot_revert_paused: if vm.is_paused(): vm.resume() else: raise error.TestFail("Revert command successed, but VM is not " "paused after reverting with --paused" " option.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("After revert cat file output='%s'", output) if not status: raise error.TestFail("Tmp file exists, revert failed.") # Close the session. session.close() # Test delete snapshot without "--metadata", delete external disk # snapshot will fail for now. # Only do this when snapshot creat succeed which filtered in cfg file. if snapshot_del_test: if snapshot_name: del_result = virsh.snapshot_delete(vm_name, snapshot_name, debug=True, ignore_status=True) del_status = del_result.exit_status snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name if del_status: if not status_error: raise error.TestFail("Failed to delete snapshot.") else: if not os.path.exists(snap_xml_path): raise error.TestFail("Snapshot xml file %s missing" % snap_xml_path) else: if status_error: err_msg = "Snapshot delete succeed but expect fail." raise error.TestFail(err_msg) else: if os.path.exists(snap_xml_path): raise error.TestFail("Snapshot xml file %s still" % snap_xml_path + " exist") finally: virsh.detach_disk(vm_name, target="vdf", extra="--persistent") if image: image.remove() if del_status and snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name, "--metadata") for disk in snapshot_external_disk: if os.path.exists(disk): os.remove(disk) vmxml_backup.sync("--snapshots-metadata") if snapshot_xml_path: if os.path.exists(snapshot_xml_path): os.unlink(snapshot_xml_path) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) except error.TestFail, detail: logging.error(str(detail))
def run(test, params, env): """ Test virsh vol-create command to cover the following matrix: pool_type = [dir, fs, netfs] volume_format = [raw, bochs, cloop, cow, dmg, iso, qcow, qcow2, qed, vmdk, vpc] pool_type = [disk] volume_format = [none, linux, fat16, fat32, linux-swap, linux-lvm, linux-raid, extended] pool_type = [logical] volume_format = [none] pool_type = [iscsi, scsi] Not supported with format type TODO: pool_type = [rbd, glusterfs] Reference: http://www.libvirt.org/storage.html """ src_pool_type = params.get("src_pool_type") src_pool_target = params.get("src_pool_target") src_pool_format = params.get("src_pool_format", "") pool_vol_num = int(params.get("src_pool_vol_num", '1')) src_emulated_image = params.get("src_emulated_image") extra_option = params.get("extra_option", "") prefix_vol_name = params.get("vol_name", "vol_create_test") vol_format = params.get("vol_format", "raw") vol_capacity = params.get("vol_capacity", 1048576) vol_allocation = params.get("vol_allocation", 1048576) image_size = params.get("emulate_image_size", "1G") lazy_refcounts = "yes" == params.get("lazy_refcounts") status_error = "yes" == params.get("status_error", "no") by_xml = "yes" == params.get("create_vol_by_xml", "yes") incomplete_target = "yes" == params.get("incomplete_target", "no") if not libvirt_version.version_compare(1, 0, 0): if "--prealloc-metadata" in extra_option: raise error.TestNAError("metadata preallocation not supported in" " current libvirt version.") if incomplete_target: raise error.TestNAError("It does not support generate target " "path in thi libvirt version.") pool_type = ['dir', 'disk', 'fs', 'logical', 'netfs', 'iscsi', 'scsi'] if src_pool_type not in pool_type: raise error.TestNAError("pool type %s not in supported type list: %s" % (src_pool_type, pool_type)) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts def post_process_vol(ori_vol_path): """ Create or disactive a volume without libvirt :param ori_vol_path: Full path of an original volume :retur: Volume name for checking """ process_vol_name = params.get("process_vol_name", "process_vol") process_vol_options = params.get("process_vol_options", "") process_vol_capacity = params.get("process_vol_capacity", vol_capacity) process_vol_cmd = "" unsupport_err = "Unsupport do '%s %s' in this test" % (process_vol_by, process_vol_type) if process_vol_by == "lvcreate": process_vol_cmd = "lvcreate -L %s " % process_vol_capacity if process_vol_type == "thin": if not process_vol_options: process_vol_options = "-T " process_vol_cmd += "%s " % process_vol_options processthin_pool_name = params.get("processthin_pool_name", "thinpool") processthin_vol_name = params.get("processthin_vol_name", "thinvol") process_vol_capacity = params.get("process_vol_capacity", "1G") os.path.dirname(ori_vol_path) process_vol_cmd += "%s/%s " % (os.path.dirname(ori_vol_path), processthin_pool_name) process_vol_cmd += "-V %s " % process_vol_capacity process_vol_cmd += "-n %s " % processthin_vol_name process_vol_name = processthin_vol_name elif process_vol_type == "snapshot": if not process_vol_options: process_vol_options = "-s " process_vol_cmd += "%s " % process_vol_options process_vol_cmd += "-n %s " % process_vol_name process_vol_cmd += "%s " % (ori_vol_path) else: logging.error(unsupport_err) return elif process_vol_by == "qemu-img" and process_vol_type == "create": process_vol_cmd = "qemu-img create " process_vol_path = os.path.dirname(ori_vol_path) + "/" process_vol_path += process_vol_name process_vol_cmd += "%s " % process_vol_options process_vol_cmd += "%s " % process_vol_path process_vol_cmd += "%s " % process_vol_capacity elif process_vol_by == "lvchange" and process_vol_type == "deactivate": process_vol_cmd = "lvchange %s " % ori_vol_path if not process_vol_options: process_vol_options = "-an" process_vol_cmd += process_vol_options else: logging.error(unsupport_err) return rst = process.run(process_vol_cmd, ignore_status=True, shell=True) if rst.exit_status: if "Snapshots of snapshots are not supported" in rst.stderr: logging.debug("%s is already a snapshot volume", ori_vol_path) process_vol_name = os.path.basename(ori_vol_path) else: logging.error(rst.stderr) return return process_vol_name def check_vol(pool_name, vol_name, expect_exist=True): """ Check volume vol_name in pool pool_name """ src_volumes = src_pv.list_volumes().keys() logging.debug("Current volumes in %s: %s", pool_name, src_volumes) if expect_exist: if vol_name not in src_volumes: raise error.TestFail("Can't find volume %s in pool %s" % (vol_name, pool_name)) # check format in volume xml post_xml = volxml.new_from_vol_dumpxml(vol_name, pool_name) logging.debug("Volume %s XML: %s" % (vol_name, post_xml.xmltreefile)) if 'format' in post_xml.keys() and vol_format is not None: if post_xml.format != vol_format: raise error.TestFail("Volume format %s is not expected" % vol_format + " as defined.") else: if vol_name in src_volumes: raise error.TestFail("Find volume %s in pool %s, but expect not" % (vol_name, pool_name)) fmt_err0 = "Unknown file format '%s'" % vol_format fmt_err1 = "Formatting or formatting option not " fmt_err1 += "supported for file format '%s'" % vol_format fmt_err2 = "Driver '%s' does not support " % vol_format fmt_err2 += "image creation" fmt_err_list = [fmt_err0, fmt_err1, fmt_err2] skip_msg = "Volume format '%s' is not supported by qemu-img" % vol_format vol_path_list = [] try: # Create the src pool src_pool_name = "virt-%s-pool" % src_pool_type pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(src_pool_name, src_pool_type, src_pool_target, src_emulated_image, image_size=image_size, source_format=src_pool_format) src_pv = libvirt_storage.PoolVolume(src_pool_name) # Print current pools for debugging logging.debug("Current pools:%s", libvirt_storage.StoragePool().list_pools()) # Create volumes by virsh in a loop while pool_vol_num > 0: # Set volume xml file vol_name = prefix_vol_name + "_%s" % pool_vol_num pool_vol_num -= 1 if by_xml: # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if src_pool_type == "disk": vol_name = utlv.new_disk_vol_name(src_pool_name) if vol_name is None: raise error.TestError("Fail to generate volume name") vol_arg['name'] = vol_name volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] if params.get('setup_libvirt_polkit') == 'yes': process.run("chmod 666 %s" % vol_xml, ignore_status=True, shell=True) # Run virsh_vol_create to create vol logging.debug("Create volume from XML: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create( src_pool_name, vol_xml, extra_option, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) else: # Run virsh_vol_create_as to create_vol cmd_result = virsh.vol_create_as( vol_name, src_pool_name, vol_capacity, vol_allocation, vol_format, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) # Check result try: utlv.check_exit_status(cmd_result, status_error) check_vol(src_pool_name, vol_name, not status_error) if not status_error: vol_path = virsh.vol_path(vol_name, src_pool_name).stdout.strip() logging.debug("Full path of %s: %s", vol_name, vol_path) vol_path_list.append(vol_path) except error.TestFail, e: stderr = cmd_result.stderr if any(err in stderr for err in fmt_err_list): raise error.TestNAError(skip_msg) else: raise e # Post process vol by other programs process_vol_by = params.get("process_vol_by") process_vol_type = params.get("process_vol_type", "") expect_vol_exist = "yes" == params.get("expect_vol_exist", "yes") if process_vol_by and vol_path_list: process_vol = post_process_vol(vol_path_list[0]) if process_vol is not None: try: virsh.pool_refresh(src_pool_name, ignore_status=False) check_vol(src_pool_name, process_vol, expect_vol_exist) except (process.CmdError, error.TestFail), e: if process_vol_type == "thin": logging.error(e) raise error.TestNAError("You may encounter bug BZ#1060287") else: raise e else: raise error.TestFail("Post process volume failed")
def run(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) vm_state = params.get("vm_state", "running") image_format = params.get("snapshot_image_format", "qcow2") snapshot_del_test = "yes" == params.get("snapshot_del_test", "no") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) snapshot_current = ("yes" == params.get("snapshot_current", "no")) snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused", "no")) replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no") # Pool variables. snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image", "emulated-image") vol_format = params.get("vol_format") lazy_refcounts = "yes" == params.get("lazy_refcounts") options = params.get("snapshot_options", "") export_options = params.get("export_options", "rw,no_root_squash,fsid=0") # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts supported_pool_list = [ "dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster" ] if snapshot_with_pool: if pool_type not in supported_pool_list: raise error.TestNAError("%s not in support list %s" % (pool_target, supported_pool_list)) # Do xml backup for final recovery vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Some variable for xmlfile of snapshot. snapshot_memory = params.get("snapshot_memory", "internal") snapshot_disk = params.get("snapshot_disk", "internal") no_memory_snap = "yes" == params.get("no_memory_snap", "no") # Skip 'qed' cases for libvirt version greater than 1.1.0 if libvirt_version.version_compare(1, 1, 0): if vol_format == "qed" or image_format == "qed": raise error.TestNAError("QED support changed, check bug: " "https://bugzilla.redhat.com/show_bug.cgi" "?id=731570") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_source_protocol == 'gluster': raise error.TestNAError("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") # Init snapshot_name snapshot_name = None snapshot_external_disk = [] snapshot_xml_path = None del_status = None image = None pvt = None # Get a tmp dir snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name try: if replace_vm_disk: utlv.set_vm_disk(vm, params, tmp_dir) if multi_gluster_disks: new_params = params.copy() new_params["pool_name"] = "gluster-pool2" new_params["vol_name"] = "gluster-vol2" new_params["disk_target"] = "vdf" new_params["image_convert"] = 'no' utlv.set_vm_disk(vm, new_params, tmp_dir) if snapshot_with_pool: # Create dst pool for create attach vol img pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], source_name=vol_name, export_options=export_options) if pool_type in ["iscsi", "disk"]: # iscsi and disk pool did not support create volume in libvirt, # logical pool could use libvirt to create volume but volume # format is not supported and will be 'raw' as default. pv = libvirt_storage.PoolVolume(pool_name) vols = pv.list_volumes().keys() if vols: vol_name = vols[0] else: raise error.TestNAError("No volume in pool: %s" % pool_name) else: # Set volume xml file volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] # Run virsh_vol_create to create vol logging.debug("create volume from xml: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create(pool_name, vol_xml, ignore_status=True, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to create attach volume.") cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() if pool_type in ["logical", "iscsi", "disk"]: # Use qemu-img to format logical, iscsi and disk block device if vol_format != "raw": cmd = "qemu-img create -f %s %s 10M" % (vol_format, img_path) cmd_result = utils.run(cmd, ignore_status=True) if cmd_result.exit_status: raise error.TestNAError("Failed to format volume, %s" % cmd_result.stdout.strip()) extra = "--persistent --subdriver %s" % vol_format else: # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format params['image_size'] = "1M" image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) extra = "--persistent --subdriver %s" % image_format if not multi_gluster_disks: # Do the attach action. out = utils.run("qemu-img info %s" % img_path) logging.debug("The img info is:\n%s" % out.stdout.strip()) result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: raise error.TestNAError("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Create snapshot. if snapshot_from_xml: snap_xml = libvirt_xml.SnapshotXML() snapshot_name = "snapshot_test" snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test" if not no_memory_snap: if "--disk-only" not in options: if snapshot_memory == "external": memory_external = os.path.join(tmp_dir, "snapshot_memory") snap_xml.mem_snap_type = snapshot_memory snap_xml.mem_file = memory_external snapshot_external_disk.append(memory_external) else: snap_xml.mem_snap_type = snapshot_memory # Add all disks into xml file. vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = snapshot_disk disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr if snapshot_disk == 'external': new_attrs = disk_xml.source.attrs if disk_xml.source.attrs.has_key('file'): new_file = "%s.snap" % disk_xml.source.attrs['file'] snapshot_external_disk.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif disk_xml.source.attrs.has_key('name'): new_name = "%s.snap" % disk_xml.source.attrs['name'] new_attrs.update({'name': new_name}) hosts = disk_xml.source.hosts elif (disk_xml.source.attrs.has_key('dev') and disk_xml.type_name == 'block'): # Use local file as external snapshot target for block type. # As block device will be treat as raw format by default, # it's not fit for external disk snapshot target. A work # around solution is use qemu-img again with the target. disk_xml.type_name = 'file' del new_attrs['dev'] new_file = "%s/blk_src_file.snap" % tmp_dir snapshot_external_disk.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) else: del disk_xml.source new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options += " --xmlfile %s " % snapshot_xml_path if vm_state == "shut off": vm.destroy(gracefully=False) snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) out_err = snapshot_result.stderr.strip() if snapshot_result.exit_status: if status_error: return else: if re.search( "live disk snapshot not supported with this " "QEMU binary", out_err): raise error.TestNAError(out_err) if libvirt_version.version_compare(1, 2, 5): # As commit d2e668e in 1.2.5, internal active snapshot # without memory state is rejected. Handle it as SKIP # for now. This could be supportted in future by bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1103063 if re.search( "internal snapshot of a running VM" + " must include the memory state", out_err): raise error.TestNAError("Check Bug #1083345, %s" % out_err) raise error.TestFail( "Failed to create snapshot. Error:%s." % out_err) else: snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail( "Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0) if snapshot_current: snap_xml = libvirt_xml.SnapshotXML() new_snap = snap_xml.new_from_snapshot_dumpxml( vm_name, snapshot_name) # update an element new_snap.creation_time = snapshot_name snapshot_xml_path = new_snap.xml options += "--redefine %s --current" % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: raise error.TestFail("Failed to create snapshot --current." "Error:%s." % snapshot_result.stderr.strip()) if status_error: if not snapshot_del_test: raise error.TestFail("Success to create snapshot in negative" " case\nDetail: %s" % snapshot_result) # Touch a file in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path status, output = session.cmd_status_output(echo_cmd) logging.debug("The echo output in domain is: '%s'", output) if status: raise error.TestFail("'%s' run failed with '%s'" % (tmp_file_path, output)) status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("File created with content: '%s'", output) session.close() # As only internal snapshot revert works now, let's only do revert # with internal, and move the all skip external cases back to pass. # After external also supported, just move the following code back. if snapshot_disk == 'internal': # Destroy vm for snapshot revert. if not libvirt_version.version_compare(1, 2, 3): virsh.destroy(vm_name) # Revert snapshot. revert_options = "" if snapshot_revert_paused: revert_options += " --paused" revert_result = virsh.snapshot_revert(vm_name, snapshot_name, revert_options, debug=True) if revert_result.exit_status: # Attempts to revert external snapshots will FAIL with an error # "revert to external disk snapshot not supported yet" or "revert # to external snapshot not supported yet" since d410e6f. Thus, # let's check for that and handle as a SKIP for now. Check bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1071264 if re.search( "revert to external \w* ?snapshot not supported yet", revert_result.stderr): raise error.TestNAError(revert_result.stderr.strip()) else: raise error.TestFail("Revert snapshot failed. %s" % revert_result.stderr.strip()) if vm.is_dead(): raise error.TestFail("Revert snapshot failed.") if snapshot_revert_paused: if vm.is_paused(): vm.resume() else: raise error.TestFail( "Revert command successed, but VM is not " "paused after reverting with --paused" " option.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("After revert cat file output='%s'", output) if not status: raise error.TestFail("Tmp file exists, revert failed.") # Close the session. session.close() # Test delete snapshot without "--metadata", delete external disk # snapshot will fail for now. # Only do this when snapshot creat succeed which filtered in cfg file. if snapshot_del_test: if snapshot_name: del_result = virsh.snapshot_delete(vm_name, snapshot_name, debug=True, ignore_status=True) del_status = del_result.exit_status snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name if del_status: if not status_error: raise error.TestFail("Failed to delete snapshot.") else: if not os.path.exists(snap_xml_path): raise error.TestFail( "Snapshot xml file %s missing" % snap_xml_path) else: if status_error: err_msg = "Snapshot delete succeed but expect fail." raise error.TestFail(err_msg) else: if os.path.exists(snap_xml_path): raise error.TestFail("Snapshot xml file %s still" % snap_xml_path + " exist") finally: if vm.is_alive(): vm.destroy(gracefully=False) virsh.detach_disk(vm_name, target="vdf", extra="--persistent") if image: image.remove() if del_status and snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name, "--metadata") for disk in snapshot_external_disk: if os.path.exists(disk): os.remove(disk) vmxml_backup.sync("--snapshots-metadata") libvirtd = utils_libvirtd.Libvirtd() if disk_source_protocol == 'gluster': utlv.setup_or_cleanup_gluster(False, vol_name, brick_path) if multi_gluster_disks: brick_path = os.path.join(tmp_dir, "gluster-pool2") utlv.setup_or_cleanup_gluster(False, "gluster-vol2", brick_path) libvirtd.restart() if snapshot_xml_path: if os.path.exists(snapshot_xml_path): os.unlink(snapshot_xml_path) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, source_name=vol_name) except error.TestFail, detail: libvirtd.restart() logging.error(str(detail))
def check_vol(expected, avail=True): """ Checks the expected volume details with actual volume details from vol-dumpxml vol-list vol-info vol-key vol-path qemu-img info """ error_count = 0 pv = libvirt_storage.PoolVolume(expected['pool_name']) vol_exists = pv.volume_exists(expected['name']) if vol_exists: if not avail: error_count += 1 logging.error("Expect volume %s not exists but find it", expected['name']) return error_count else: if avail: error_count += 1 logging.error("Expect volume %s exists but not find it", expected['name']) return error_count else: logging.info("Volume %s checked successfully for deletion", expected['name']) return error_count actual_list = get_vol_list(expected['pool_name'], expected['name']) actual_info = pv.volume_info(expected['name']) # Get values from vol-dumpxml volume_xml = vol_xml.VolXML.new_from_vol_dumpxml( expected['name'], expected['pool_name']) # Check against virsh vol-key vol_key = virsh.vol_key(expected['name'], expected['pool_name']) if vol_key.stdout.strip() != volume_xml.key: logging.error( "Volume key is mismatch \n%s" "Key from xml: %s\nKey from command: %s", expected['name'], volume_xml.key, vol_key) error_count += 1 else: logging.debug( "virsh vol-key for volume: %s successfully" " checked against vol-dumpxml", expected['name']) # Check against virsh vol-name get_vol_name = virsh.vol_name(expected['path']) if get_vol_name.stdout.strip() != expected['name']: logging.error( "Volume name mismatch\n" "Expected name: %s\nOutput of vol-name: %s", expected['name'], get_vol_name) # Check against virsh vol-path vol_path = virsh.vol_path(expected['name'], expected['pool_name']) if expected['path'] != vol_path.stdout.strip(): logging.error( "Volume path mismatch for volume: %s\n" "Expected path: %s\nOutput of vol-path: %s\n", expected['name'], expected['path'], vol_path) error_count += 1 else: logging.debug( "virsh vol-path for volume: %s successfully checked" " against created volume path", expected['name']) # Check path against virsh vol-list if expected['path'] != actual_list['path']: logging.error( "Volume path mismatch for volume:%s\n" "Expected Path: %s\nPath from virsh vol-list: %s", expected['name'], expected['path'], actual_list['path']) error_count += 1 else: logging.debug( "Path of volume: %s from virsh vol-list " "successfully checked against created " "volume path", expected['name']) # Check path against virsh vol-dumpxml if expected['path'] != volume_xml.path: logging.error( "Volume path mismatch for volume: %s\n" "Expected Path: %s\nPath from virsh vol-dumpxml: %s", expected['name'], expected['path'], volume_xml.path) error_count += 1 else: logging.debug( "Path of volume: %s from virsh vol-dumpxml " "successfully checked against created volume path", expected['name']) # Check type against virsh vol-list if expected['type'] != actual_list['type']: logging.error( "Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-list: %s", expected['name'], expected['type'], actual_list['type']) error_count += 1 else: logging.debug( "Type of volume: %s from virsh vol-list " "successfully checked against the created " "volume type", expected['name']) # Check type against virsh vol-info if expected['type'] != actual_info['Type']: logging.error( "Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-info: %s", expected['name'], expected['type'], actual_info['Type']) error_count += 1 else: logging.debug( "Type of volume: %s from virsh vol-info successfully" " checked against the created volume type", expected['name']) # Check name against virsh vol-info if expected['name'] != actual_info['Name']: logging.error( "Volume name mismatch for volume: %s\n" "Expected name: %s\n Name from vol-info: %s", expected['name'], expected['name'], actual_info['Name']) error_count += 1 else: logging.debug( "Name of volume: %s from virsh vol-info successfully" " checked against the created volume name", expected['name']) # Check format from against qemu-img info img_info = utils_misc.get_image_info(expected['path']) if expected['format']: if expected['format'] != img_info['format']: logging.error( "Volume format mismatch for volume: %s\n" "Expected format: %s\n" "Format from qemu-img info: %s", expected['name'], expected['format'], img_info['format']) error_count += 1 else: logging.debug( "Format of volume: %s from qemu-img info " "checked successfully against the created " "volume format", expected['name']) # Check format against vol-dumpxml if expected['format']: if expected['format'] != volume_xml.format: logging.error( "Volume format mismatch for volume: %s\n" "Expected format: %s\n" "Format from vol-dumpxml: %s", expected['name'], expected['format'], volume_xml.format) error_count += 1 else: logging.debug( "Format of volume: %s from virsh vol-dumpxml " "checked successfully against the created" " volume format", expected['name']) logging.info(expected['encrypt_format']) # Check encrypt against vol-dumpxml if expected['encrypt_format']: # As the 'default' format will change to specific valut(qcow), so # just output it here logging.debug("Encryption format of volume '%s' is: %s", expected['name'], volume_xml.encryption.format) # And also output encryption secret uuid secret_uuid = volume_xml.encryption.secret['uuid'] logging.debug("Encryption secret of volume '%s' is: %s", expected['name'], secret_uuid) if expected['encrypt_secret']: if expected['encrypt_secret'] != secret_uuid: logging.error( "Encryption secret mismatch for volume: %s\n" "Expected secret uuid: %s\n" "Secret uuid from vol-dumpxml: %s", expected['name'], expected['encrypt_secret'], secret_uuid) error_count += 1 else: # If no set encryption secret value, automatically # generate a secret value at the time of volume creation logging.debug("Volume encryption secret is %s", secret_uuid) # Check pool name against vol-pool vol_pool = virsh.vol_pool(expected['path']) if expected['pool_name'] != vol_pool.stdout.strip(): logging.error( "Pool name mismatch for volume: %s against" "virsh vol-pool", expected['name']) error_count += 1 else: logging.debug( "Pool name of volume: %s checked successfully" " against the virsh vol-pool", expected['name']) norm_cap = {} capacity = {} capacity['list'] = actual_list['capacity'] capacity['info'] = actual_info['Capacity'] capacity['xml'] = volume_xml.capacity capacity['qemu_img'] = img_info['vsize'] norm_cap = norm_capacity(capacity) delta_size = int(params.get('delta_size', "1024")) if abs(expected['capacity'] - norm_cap['list']) > delta_size: logging.error( "Capacity mismatch for volume: %s against virsh" " vol-list\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['list']) error_count += 1 else: logging.debug( "Capacity value checked successfully against" " virsh vol-list for volume %s", expected['name']) if abs(expected['capacity'] - norm_cap['info']) > delta_size: logging.error( "Capacity mismatch for volume: %s against virsh" " vol-info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['info']) error_count += 1 else: logging.debug( "Capacity value checked successfully against" " virsh vol-info for volume %s", expected['name']) if abs(expected['capacity'] - norm_cap['xml']) > delta_size: logging.error( "Capacity mismatch for volume: %s against virsh" " vol-dumpxml\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['xml']) error_count += 1 else: logging.debug( "Capacity value checked successfully against" " virsh vol-dumpxml for volume: %s", expected['name']) if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size: logging.error( "Capacity mismatch for volume: %s against " "qemu-img info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['qemu_img']) error_count += 1 else: logging.debug( "Capacity value checked successfully against" " qemu-img info for volume: %s", expected['name']) return error_count
def check_vol(expected, avail=True): """ Checks the expected volume details with actual volume details from vol-dumpxml vol-list vol-info vol-key vol-path qemu-img info """ error_count = 0 volume_xml = {} (isavail, actual_list) = get_vol_list(expected['pool_name'], expected['name']) actual_info = get_vol_info(expected['pool_name'], expected['name']) if not avail: if isavail: error_count += 1 logging.error("Deleted vol: %s is still shown in vol-list", expected['name']) else: logging.info("Volume %s checked successfully for deletion", expected['name']) return error_count else: if not isavail: logging.error("Volume list does not show volume %s", expected['name']) logging.error("Volume creation failed") error_count += 1 # Get values from vol-dumpxml volume_xml = vol_xml.VolXML.get_vol_details_by_name( expected['name'], expected['pool_name']) # Check against virsh vol-key vol_key = virsh.vol_key(expected['name'], expected['pool_name']) if vol_key.stdout.strip() != volume_xml['key']: logging.error( "Volume key is mismatch \n%s" "Key from xml: %s\n Key from command: %s", expected['name'], volume_xml['key'], vol_key) error_count += 1 else: logging.debug( "virsh vol-key for volume: %s successfully" " checked against vol-dumpxml", expected['name']) # Check against virsh vol-path vol_path = virsh.vol_path(expected['name'], expected['pool_name']) if expected['path'] != vol_path.stdout.strip(): logging.error( "Volume path mismatch for volume: %s\n" "Expected path: %s\n Output of vol-path: %s\n", expected['name'], expected['path'], vol_path) error_count += 1 else: logging.debug( "virsh vol-path for volume: %s successfully checked" " against created volume path", expected['name']) # Check path against virsh vol-list if isavail: if expected['path'] != actual_list['path']: logging.error( "Volume path mismatch for volume:%s\n" "Expected Path: %s\n Path from virsh vol-list: %s", expected['name'], expected['path'], actual_list['path']) error_count += 1 else: logging.debug( "Path of volume: %s from virsh vol-list " "successfully checked against created " "volume path", expected['name']) # Check path against virsh vol-dumpxml if expected['path'] != volume_xml['path']: logging.error( "Volume path mismatch for volume: %s\n" "Expected Path: %s\n Path from virsh vol-dumpxml: %s", expected['name'], expected['path'], volume_xml['path']) error_count += 1 else: logging.debug( "Path of volume: %s from virsh vol-dumpxml " "successfully checked against created volume path", expected['name']) # Check type against virsh vol-list if isavail: if expected['type'] != actual_list['type']: logging.error( "Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-list: %s", expected['name'], expected['type'], actual_list['type']) error_count += 1 else: logging.debug( "Type of volume: %s from virsh vol-list " "successfully checked against the created " "volume type", expected['name']) # Check type against virsh vol-info if expected['type'] != actual_info['type']: logging.error( "Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-info: %s", expected['name'], expected['type'], actual_info['type']) error_count += 1 else: logging.debug( "Type of volume: %s from virsh vol-info successfully" " checked against the created volume type", expected['name']) # Check name against virsh vol-info if expected['name'] != actual_info['name']: logging.error( "Volume name mismatch for volume: %s\n" "Expected name: %s\n Name from vol-info: %s", expected['name'], expected['name'], actual_info['name']) error_count += 1 else: logging.debug( "Name of volume: %s from virsh vol-info successfully" " checked against the created volume name", expected['name']) # Check format from against qemu-img info img_info = get_image_info(expected['path']) if expected['format'] != img_info['format']: logging.error( "Volume format mismatch for volume: %s\n" "Expected format: %s\n Format from qemu-img info: %s", expected['name'], expected['format'], img_info['format']) error_count += 1 else: logging.debug( "Format of volume: %s from qemu-img info checked " "successfully against the created volume format", expected['name']) # Check format against vol-dumpxml if expected['format'] != volume_xml['format']: logging.error( "Volume format mismatch for volume: %s\n" "Expected format: %s\n Format from vol-dumpxml: %s", expected['name'], expected['format'], volume_xml['format']) error_count += 1 else: logging.debug( "Format of volume: %s from virsh vol-dumpxml checked" " successfully against the created volume format", expected['name']) # Check pool name against vol-pool vol_pool = virsh.vol_pool(expected['path']) if expected['pool_name'] != vol_pool.stdout.strip(): logging.error( "Pool name mismatch for volume: %s against" "virsh vol-pool", expected['name']) error_count += 1 else: logging.debug( "Pool name of volume: %s checked successfully" " against the virsh vol-pool", expected['name']) norm_cap = {} capacity = {} capacity['list'] = actual_list['capacity'] capacity['info'] = actual_info['capacity'] capacity['xml'] = volume_xml['capacity'] capacity['qemu_img'] = img_info['capacity'] norm_cap = norm_capacity(capacity) if expected['capacity'] != norm_cap['list']: logging.error( "Capacity mismatch for volume: %s against virsh" " vol-list\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['list']) error_count += 1 else: logging.debug( "Capacity value checked successfully against" " virsh vol-list for volume %s", expected['name']) if expected['capacity'] != norm_cap['info']: logging.error( "Capacity mismatch for volume: %s against virsh" " vol-info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['info']) error_count += 1 else: logging.debug( "Capacity value checked successfully against" " virsh vol-info for volume %s", expected['name']) if expected['capacity'] != norm_cap['xml']: logging.error( "Capacity mismatch for volume: %s against virsh" " vol-dumpxml\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['xml']) error_count += 1 else: logging.debug( "Capacity value checked successfully against" " virsh vol-dumpxml for volume: %s", expected['name']) if expected['capacity'] != norm_cap['qemu_img']: logging.error( "Capacity mismatch for volume: %s against " "qemu-img info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['qemu_img']) error_count += 1 else: logging.debug( "Capacity value checked successfully against" " qemu-img info for volume: %s", expected['name']) return error_count
def run(test, params, env): """ Do test for vol-download and vol-upload Basic steps are 1. Create pool with type defined in cfg 2. Create image with writing data in it 3. Get md5 value before operation 4. Do vol-download/upload with options(offset, length) 5. Check md5 value after operation """ pool_type = params.get("vol_download_upload_pool_type") pool_name = params.get("vol_download_upload_pool_name") pool_target = params.get("vol_download_upload_pool_target") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_download_upload_vol_name") file_name = params.get("vol_download_upload_file_name") file_path = os.path.join(test.tmpdir, file_name) offset = params.get("vol_download_upload_offset") length = params.get("vol_download_upload_length") capacity = params.get("vol_download_upload_capacity") allocation = params.get("vol_download_upload_allocation") frmt = params.get("vol_download_upload_format") operation = params.get("vol_download_upload_operation") create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes")) # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" + " libvirt version.") try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest", pre_disk_vol=["50M"]) if create_vol: pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name) vol_list = virsh.vol_list(pool_name).stdout.strip() # iscsi volume name is different from others if pool_type == "iscsi": vol_name = vol_list.split('\n')[2].split()[0] vol_path = virsh.vol_path(vol_name, pool_name, ignore_status=False).stdout.strip() logging.debug("vol_path is %s", vol_path) # Add command options if pool_type is not None: options = " --pool %s" % pool_name if offset is not None: options += " --offset %s" % offset offset = int(offset) else: offset = 0 if length is not None: options += " --length %s" % length length = int(length) else: length = 0 logging.debug("%s options are %s", operation, options) if operation == "upload": # write date to file write_file(file_path) # Set length for calculate the offset + length in the following # func get_pre_post_digest() and digest() if length == 0: length = 1048576 def get_pre_post_digest(): """ Get pre region and post region digest if have offset and length :return: pre digest and post digest """ # Get digest of pre region before offset if offset != 0: digest_pre = digest(vol_path, 0, offset) else: digest_pre = 0 logging.debug("pre region digest read from %s 0-%s is %s", vol_path, offset, digest_pre) # Get digest of post region after offset+length digest_post = digest(vol_path, offset + length, 0) logging.debug("post region digest read from %s %s-0 is %s", vol_path, offset + length, digest_post) return (digest_pre, digest_post) # Get pre and post digest before operation for compare (ori_pre_digest, ori_post_digest) = get_pre_post_digest() ori_digest = digest(file_path, 0, 0) logging.debug("ori digest read from %s is %s", file_path, ori_digest) if params.get('setup_libvirt_polkit') == 'yes': utils.run("chmod 666 %s" % file_path) # Do volume upload result = virsh.vol_upload(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation (aft_pre_digest, aft_post_digest) = get_pre_post_digest() aft_digest = digest(vol_path, offset, length) logging.debug("aft digest read from %s is %s", vol_path, aft_digest) # Compare the pre and post part before and after if ori_pre_digest == aft_pre_digest and \ ori_post_digest == aft_post_digest: logging.info("file pre and aft digest match") else: raise error.TestFail( "file pre or post digests do not" "match, in %s", operation) if operation == "download": # Write date to volume if pool_type == "disk": utils.run("mkfs.ext3 -F %s" % vol_path) write_file(vol_path) # Record the digest value before operation ori_digest = digest(vol_path, offset, length) logging.debug("original digest read from %s is %s", vol_path, ori_digest) if params.get('setup_libvirt_polkit') == 'yes': utils.run("chmod 666 %s" % file_path) # Do volume download result = virsh.vol_download(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation aft_digest = digest(file_path, 0, 0) logging.debug("new digest read from %s is %s", file_path, aft_digest) if result.exit_status != 0: raise error.TestFail("Fail to %s volume: %s" % (operation, result.stderr)) # Compare the change part on volume and file if ori_digest == aft_digest: logging.info("file digests match, volume %s suceed", operation) else: raise error.TestFail("file digests do not match, volume %s failed", operation) finally: utlv.PoolVolumeTest(test, params).cleanup_pool(pool_name, pool_type, pool_target, "volumetest")
def run(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) image_format = params.get("snapshot_image_format", "qcow2") snapshot_del_test = "yes" == params.get("snapshot_del_test", "no") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) snapshot_current = ("yes" == params.get("snapshot_current", "no")) snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused", "no")) # Pool variables. snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") lazy_refcounts = "yes" == params.get("lazy_refcounts") options = params.get("snapshot_options", "") # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts supported_pool_list = [ "dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster" ] if snapshot_with_pool: if pool_type not in supported_pool_list: raise error.TestNAError("%s not in support list %s" % (pool_target, supported_pool_list)) # Do xml backup for final recovery vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Some variable for xmlfile of snapshot. snapshot_memory = params.get("snapshot_memory", "internal") snapshot_disk = params.get("snapshot_disk", "internal") # Skip 'qed' cases for libvirt version greater than 1.1.0 if libvirt_version.version_compare(1, 1, 0): if vol_format == "qed": raise error.TestNAError("QED support changed, check bug: " "https://bugzilla.redhat.com/show_bug.cgi" "?id=731570") # Init snapshot_name snapshot_name = None snapshot_external_disk = [] snapshot_xml_path = None del_status = None image = None pvt = None # Get a tmp dir tmp_dir = data_dir.get_tmp_dir() snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name try: if snapshot_with_pool: # Create dst pool for create attach vol img pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"]) if pool_type in ["iscsi", "disk"]: # iscsi and disk pool did not support create volume in libvirt, # logical pool could use libvirt to create volume but volume # format is not supported and will be 'raw' as default. pv = libvirt_storage.PoolVolume(pool_name) vols = pv.list_volumes().keys() if vols: vol_name = vols[0] else: raise error.TestNAError("No volume in pool: %s", pool_name) else: # Set volume xml file volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] # Run virsh_vol_create to create vol logging.debug("create volume from xml: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create(pool_name, vol_xml, ignore_status=True, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to create attach volume.") cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() if pool_type in ["logical", "iscsi", "disk"]: # Use qemu-img to format logical, iscsi and disk block device if vol_format != "raw": cmd = "qemu-img create -f %s %s 10M" % (vol_format, img_path) cmd_result = utils.run(cmd, ignore_status=True) if cmd_result.exit_status: raise error.TestNAError("Failed to format volume, %s" % cmd_result.stdout.strip()) extra = "--persistent --subdriver %s" % vol_format else: # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format params['image_size'] = "1M" image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) extra = "--persistent --subdriver %s" % image_format # Do the attach action. out = utils.run("qemu-img info %s" % img_path) logging.debug("The img info is:\n%s" % out.stdout.strip()) result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: raise error.TestNAError("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Create snapshot. if snapshot_from_xml: snapshot_name = "snapshot_test" lines = [ "<domainsnapshot>\n", "<name>%s</name>\n" % snapshot_name, "<description>Snapshot Test</description>\n" ] if snapshot_memory == "external": memory_external = os.path.join(tmp_dir, "snapshot_memory") snapshot_external_disk.append(memory_external) lines.append("<memory snapshot=\'%s\' file='%s'/>\n" % (snapshot_memory, memory_external)) else: lines.append("<memory snapshot='%s'/>\n" % snapshot_memory) # Add all disks into xml file. disks = vm.get_disk_devices().values() lines.append("<disks>\n") for disk in disks: lines.append("<disk name='%s' snapshot='%s'>\n" % (disk['source'], snapshot_disk)) if snapshot_disk == "external": snap_path = "%s.snap" % os.path.basename(disk['source']) disk_external = os.path.join(tmp_dir, snap_path) snapshot_external_disk.append(disk_external) lines.append("<source file='%s'/>\n" % disk_external) lines.append("</disk>\n") lines.append("</disks>\n") lines.append("</domainsnapshot>") snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() logging.debug("The xml content for snapshot create is:") with open(snapshot_xml_path, 'r') as fin: logging.debug(fin.read()) options += " --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) out_err = snapshot_result.stderr.strip() if snapshot_result.exit_status: if status_error: return else: if re.search( "live disk snapshot not supported with this QEMU binary", out_err): raise error.TestNAError(out_err) if libvirt_version.version_compare(1, 2, 5): # As commit d2e668e in 1.2.5, internal active snapshot # without memory state is rejected. Handle it as SKIP # for now. This could be supportted in future by bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1103063 if re.search( "internal snapshot of a running VM" + " must include the memory state", out_err): raise error.TestNAError("Check Bug #1083345, %s" % out_err) raise error.TestFail( "Failed to create snapshot. Error:%s." % out_err) else: snapshot_result = virsh.snapshot_create(vm_name, options) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail( "Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0) if snapshot_current: lines = [ "<domainsnapshot>\n", "<description>Snapshot Test</description>\n", "<state>running</state>\n", "<creationTime>%s</creationTime>" % snapshot_name, "</domainsnapshot>" ] snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() logging.debug("The xml content for snapshot create is:") with open(snapshot_xml_path, 'r') as fin: logging.debug(fin.read()) options += "--redefine %s --current" % snapshot_xml_path if snapshot_result.exit_status: raise error.TestFail("Failed to create snapshot --current." "Error:%s." % snapshot_result.stderr.strip()) if status_error: if not snapshot_del_test: raise error.TestFail("Success to create snapshot in negative" " case\nDetail: %s" % snapshot_result) # Touch a file in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path status, output = session.cmd_status_output(echo_cmd) logging.debug("The echo output in domain is: '%s'", output) if status: raise error.TestFail("'%s' run failed with '%s'" % (tmp_file_path, output)) status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("File created with content: '%s'", output) session.close() # Destroy vm for snapshot revert. if not libvirt_version.version_compare(1, 2, 3): virsh.destroy(vm_name) # Revert snapshot. revert_options = "" if snapshot_revert_paused: revert_options += " --paused" revert_result = virsh.snapshot_revert(vm_name, snapshot_name, revert_options, debug=True) if revert_result.exit_status: # As commit d410e6f for libvirt 1.2.3, attempts to revert external # snapshots will FAIL with an error "revert to external snapshot # not supported yet". Thus, let's check for that and handle as a # SKIP for now. Check bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1071264 if libvirt_version.version_compare(1, 2, 3): if re.search("revert to external snapshot not supported yet", revert_result.stderr): raise error.TestNAError(revert_result.stderr.strip()) else: raise error.TestFail("Revert snapshot failed. %s" % revert_result.stderr.strip()) if vm.is_dead(): raise error.TestFail("Revert snapshot failed.") if snapshot_revert_paused: if vm.is_paused(): vm.resume() else: raise error.TestFail("Revert command successed, but VM is not " "paused after reverting with --paused" " option.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("After revert cat file output='%s'", output) if not status: raise error.TestFail("Tmp file exists, revert failed.") # Close the session. session.close() # Test delete snapshot without "--metadata", delete external disk # snapshot will fail for now. # Only do this when snapshot creat succeed which filtered in cfg file. if snapshot_del_test: if snapshot_name: del_result = virsh.snapshot_delete(vm_name, snapshot_name, debug=True, ignore_status=True) del_status = del_result.exit_status snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name if del_status: if not status_error: raise error.TestFail("Failed to delete snapshot.") else: if not os.path.exists(snap_xml_path): raise error.TestFail( "Snapshot xml file %s missing" % snap_xml_path) else: if status_error: err_msg = "Snapshot delete succeed but expect fail." raise error.TestFail(err_msg) else: if os.path.exists(snap_xml_path): raise error.TestFail("Snapshot xml file %s still" % snap_xml_path + " exist") finally: virsh.detach_disk(vm_name, target="vdf", extra="--persistent") if image: image.remove() if del_status and snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name, "--metadata") for disk in snapshot_external_disk: if os.path.exists(disk): os.remove(disk) vmxml_backup.sync("--snapshots-metadata") if snapshot_xml_path: if os.path.exists(snapshot_xml_path): os.unlink(snapshot_xml_path) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) except error.TestFail, detail: logging.error(str(detail))
def run(test, params, env): """ Do test for vol-download and vol-upload Basic steps are 1. Create pool with type defined in cfg 2. Create image with writing data in it 3. Get md5 value before operation 4. Do vol-download/upload with options(offset, length) 5. Check md5 value after operation """ pool_type = params.get("vol_download_upload_pool_type") pool_name = params.get("vol_download_upload_pool_name") pool_target = params.get("vol_download_upload_pool_target") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("vol_download_upload_vol_name") file_name = params.get("vol_download_upload_file_name") file_path = os.path.join(data_dir.get_tmp_dir(), file_name) offset = params.get("vol_download_upload_offset") length = params.get("vol_download_upload_length") capacity = params.get("vol_download_upload_capacity") allocation = params.get("vol_download_upload_allocation") frmt = params.get("vol_download_upload_format") operation = params.get("vol_download_upload_operation") create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes")) setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") b_luks_encrypt = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.error("API acl test not supported in current" " libvirt version.") try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest", pre_disk_vol=["50M"]) # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if pool_type == "disk": vol_name = utlv.new_disk_vol_name(pool_name) if vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % vol_name utlv.update_polkit_rule(params, vol_pat, new_value) if create_vol: if b_luks_encrypt: if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS format not supported in " "current libvirt version") params['sec_volume'] = os.path.join(pool_target, vol_name) luks_sec_uuid = utlv.create_secret(params) ret = virsh.secret_set_value(luks_sec_uuid, encryption_password, encode=True) utlv.check_exit_status(ret) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(capacity) vol_arg['allocation'] = int(allocation) create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name) vol_list = virsh.vol_list(pool_name).stdout.strip() # iscsi volume name is different from others if pool_type == "iscsi": vol_name = vol_list.split('\n')[2].split()[0] vol_path = virsh.vol_path(vol_name, pool_name, ignore_status=False).stdout.strip() logging.debug("vol_path is %s", vol_path) # Add command options if pool_type is not None: options = " --pool %s" % pool_name if offset is not None: options += " --offset %s" % offset offset = int(offset) else: offset = 0 if length is not None: options += " --length %s" % length length = int(length) else: length = 0 logging.debug("%s options are %s", operation, options) if operation == "upload": # write date to file write_file(file_path) # Set length for calculate the offset + length in the following # func get_pre_post_digest() and digest() if length == 0: length = 1048576 def get_pre_post_digest(): """ Get pre region and post region digest if have offset and length :return: pre digest and post digest """ # Get digest of pre region before offset if offset != 0: digest_pre = digest(vol_path, 0, offset) else: digest_pre = 0 logging.debug("pre region digest read from %s 0-%s is %s", vol_path, offset, digest_pre) # Get digest of post region after offset+length digest_post = digest(vol_path, offset + length, 0) logging.debug("post region digest read from %s %s-0 is %s", vol_path, offset + length, digest_post) return (digest_pre, digest_post) # Get pre and post digest before operation for compare (ori_pre_digest, ori_post_digest) = get_pre_post_digest() ori_digest = digest(file_path, 0, 0) logging.debug("ori digest read from %s is %s", file_path, ori_digest) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume upload result = virsh.vol_upload(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation (aft_pre_digest, aft_post_digest) = get_pre_post_digest() aft_digest = digest(vol_path, offset, length) logging.debug("aft digest read from %s is %s", vol_path, aft_digest) # Compare the pre and post part before and after if ori_pre_digest == aft_pre_digest and \ ori_post_digest == aft_post_digest: logging.info("file pre and aft digest match") else: test.fail("file pre or post digests do not" "match, in %s", operation) if operation == "download": # Write date to volume write_file(vol_path) # Record the digest value before operation ori_digest = digest(vol_path, offset, length) logging.debug("original digest read from %s is %s", vol_path, ori_digest) process.run("touch %s" % file_path, ignore_status=True, shell=True) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume download result = virsh.vol_download(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation aft_digest = digest(file_path, 0, 0) logging.debug("new digest read from %s is %s", file_path, aft_digest) if result.exit_status != 0: test.fail("Fail to %s volume: %s" % (operation, result.stderr)) # Compare the change part on volume and file if ori_digest == aft_digest: logging.info("file digests match, volume %s suceed", operation) else: test.fail("file digests do not match, volume %s failed" % operation) finally: pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest") for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) if os.path.isfile(file_path): os.remove(file_path)
def run(test, params, env): """ Test DAC in adding nfs pool disk to VM. (1).Init variables for test. (2).Create nfs pool and vol. (3).Attach the nfs pool vol to VM. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing") # Get qemu.conf config variables qemu_user = params.get("qemu_user") qemu_group = params.get("qemu_group") dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about pool vol virt_use_nfs = params.get("virt_use_nfs", "off") nfs_server_dir = params.get("nfs_server_dir", "nfs-server") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") export_options = params.get("export_options", "rw,async,no_root_squash") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") bk_file_name = params.get("bk_file_name") # Get pool vol variables img_tup = ("img_user", "img_group", "img_mode") img_val = [] for i in img_tup: try: img_val.append(int(params.get(i))) except ValueError: test.cancel("%s value '%s' is not a number." % (i, params.get(i))) # False positive - img_val was filled in the for loop above. # pylint: disable=E0632 img_user, img_group, img_mode = img_val # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vm_os_xml = vmxml.os # Backup domain disk label disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in list(disks.values()): disk_path = disk['source'] label = check_ownership(disk_path) if label: backup_labels_of_disks[disk_path] = label try: if vm_os_xml.nvram: nvram_path = vm_os_xml.nvram if not os.path.exists(nvram_path): # Need libvirt automatically generate the path vm.start() vm.destroy(gracefully=False) label = check_ownership(nvram_path) if label: backup_labels_of_disks[nvram_path] = label except xcepts.LibvirtXMLNotFoundError: logging.debug("vm xml don't have nvram element") # Backup selinux status of host. backup_sestatus = utils_selinux.get_status() pvt = None snapshot_name = None disk_snap_path = [] qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # chown domain disk to qemu:qemu to avoid fail on local disk for file_path in list(backup_labels_of_disks.keys()): if qemu_user == "root": os.chown(file_path, 0, 0) elif qemu_user == "qemu": os.chown(file_path, 107, 107) else: process.run('chown %s %s' % (qemu_user, file_path), shell=True) # Set selinux of host. if backup_sestatus == "disabled": test.cancel("SELinux is in Disabled mode." "It must be Enabled to" "run this test") utils_selinux.set_status(host_sestatus) # set qemu conf qemu_conf.user = qemu_user qemu_conf.group = qemu_user if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s", qemu_conf) libvirtd.restart() # Create dst pool for create attach vol img logging.debug("export_options is: %s", export_options) pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], export_options=export_options) # set virt_use_nfs result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True) if result.exit_status: test.cancel("Failed to set virt_use_nfs value") # Init a QemuImg instance and create img on nfs server dir. params['image_name'] = vol_name tmp_dir = data_dir.get_tmp_dir() nfs_path = os.path.join(tmp_dir, nfs_server_dir) image = qemu_storage.QemuImg(params, nfs_path, vol_name) # Create a image. server_img_path, result = image.create(params) if params.get("image_name_backing_file"): params['image_name'] = bk_file_name params['has_backing_file'] = "yes" image = qemu_storage.QemuImg(params, nfs_path, bk_file_name) server_img_path, result = image.create(params) # Get vol img path vol_name = server_img_path.split('/')[-1] virsh.pool_refresh(pool_name, debug=True) cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: test.cancel("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() # Do the attach action. extra = "--persistent --subdriver qcow2" result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: test.fail("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Change img ownership and mode on nfs server dir os.chown(server_img_path, img_user, img_group) os.chmod(server_img_path, img_mode) img_label_before = check_ownership(server_img_path) if img_label_before: logging.debug( "attached image ownership on nfs server before " "start: %s", img_label_before) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. img_label_after = check_ownership(server_img_path) if img_label_after: logging.debug( "attached image ownership on nfs server after" " start: %s", img_label_after) if status_error: test.fail('Test succeeded in negative case.') except virt_vm.VMStartError as e: # Starting VM failed. if not status_error: test.fail("Test failed in positive case." "error: %s" % e) if params.get("image_name_backing_file"): options = "--disk-only" snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if not status_error: test.fail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0) if snapshot_name: disks_snap = vm.get_disk_devices() for disk in list(disks_snap.values()): disk_snap_path.append(disk['source']) virsh.snapshot_delete(vm_name, snapshot_name, "--metadata", debug=True) try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", debug=True) except process.CmdError: test.fail("Detach disk 'vdf' from VM %s failed." % vm.name) finally: # clean up vm.destroy() qemu_conf.restore() for path, label in list(backup_labels_of_disks.items()): label_list = label.split(":") os.chown(path, int(label_list[0]), int(label_list[1])) if snapshot_name: backup_xml.sync("--snapshots-metadata") else: backup_xml.sync() for i in disk_snap_path: if i and os.path.exists(i): os.unlink(i) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) except exceptions.TestFail as detail: logging.error(str(detail)) utils_selinux.set_status(backup_sestatus) libvirtd.restart()
def run(test, params, env): """ Test virsh vol-create and vol-create-as command to cover the following matrix: pool_type = [dir, fs, netfs] volume_format = [raw, bochs, cloop, cow, dmg, iso, qcow, qcow2, qed, vmdk, vpc] pool_type = [disk] volume_format = [none, linux, fat16, fat32, linux-swap, linux-lvm, linux-raid, extended] pool_type = [logical] volume_format = [none] pool_type = [iscsi, scsi] Not supported with format type TODO: pool_type = [rbd, glusterfs] Reference: http://www.libvirt.org/storage.html """ src_pool_type = params.get("src_pool_type") src_pool_target = params.get("src_pool_target") src_pool_format = params.get("src_pool_format", "") pool_vol_num = int(params.get("src_pool_vol_num", '1')) src_emulated_image = params.get("src_emulated_image") extra_option = params.get("extra_option", "") prefix_vol_name = params.get("vol_name", "vol_create_test") vol_format = params.get("vol_format", "raw") vol_capacity = params.get("vol_capacity", 1048576) vol_allocation = params.get("vol_allocation", 1048576) image_size = params.get("emulate_image_size", "1G") lazy_refcounts = "yes" == params.get("lazy_refcounts") status_error = "yes" == params.get("status_error", "no") by_xml = "yes" == params.get("create_vol_by_xml", "yes") incomplete_target = "yes" == params.get("incomplete_target", "no") luks_encrypted = "luks" == params.get("encryption_method") encryption_secret_type = params.get("encryption_secret_type", "passphrase") virsh_readonly_mode = 'yes' == params.get("virsh_readonly", "no") if not libvirt_version.version_compare(1, 0, 0): if "--prealloc-metadata" in extra_option: test.cancel("metadata preallocation not supported in" " current libvirt version.") if incomplete_target: test.cancel("It does not support generate target path" "in current libvirt version.") pool_type = ['dir', 'disk', 'fs', 'logical', 'netfs', 'iscsi', 'scsi'] if src_pool_type not in pool_type: test.cancel("pool type %s not in supported type list: %s" % (src_pool_type, pool_type)) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts def create_luks_secret(vol_path): """ Create secret for luks encryption :param vol_path. volume path. :return: secret id if create successfully. """ sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "volume secret" sec_xml.usage = 'volume' sec_xml.volume = vol_path sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) utlv.check_exit_status(ret) # Get secret uuid. try: encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() except IndexError as detail: test.error("Fail to get newly created secret uuid") logging.debug("Secret uuid %s", encryption_uuid) # Set secret value. encoding = locale.getpreferredencoding() secret_string = base64.b64encode( 'redhat'.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(encryption_uuid, secret_string) utlv.check_exit_status(ret) return encryption_uuid def post_process_vol(ori_vol_path): """ Create or disactive a volume without libvirt :param ori_vol_path: Full path of an original volume :retur: Volume name for checking """ process_vol_name = params.get("process_vol_name", "process_vol") process_vol_options = params.get("process_vol_options", "") process_vol_capacity = params.get("process_vol_capacity", vol_capacity) process_vol_cmd = "" unsupport_err = "Unsupport do '%s %s' in this test" % ( process_vol_by, process_vol_type) if process_vol_by == "lvcreate": process_vol_cmd = "lvcreate -L %s " % process_vol_capacity if process_vol_type == "thin": if not process_vol_options: process_vol_options = "-T " process_vol_cmd += "%s " % process_vol_options processthin_pool_name = params.get("processthin_pool_name", "thinpool") processthin_vol_name = params.get("processthin_vol_name", "thinvol") process_vol_capacity = params.get("process_vol_capacity", "1G") os.path.dirname(ori_vol_path) process_vol_cmd += "%s/%s " % (os.path.dirname(ori_vol_path), processthin_pool_name) process_vol_cmd += "-V %s " % process_vol_capacity process_vol_cmd += "-n %s " % processthin_vol_name process_vol_name = processthin_vol_name elif process_vol_type == "snapshot": if not process_vol_options: process_vol_options = "-s " process_vol_cmd += "%s " % process_vol_options process_vol_cmd += "-n %s " % process_vol_name process_vol_cmd += "%s " % (ori_vol_path) else: logging.error(unsupport_err) return elif process_vol_by == "qemu-img" and process_vol_type == "create": process_vol_cmd = "qemu-img create " process_vol_path = os.path.dirname(ori_vol_path) + "/" process_vol_path += process_vol_name process_vol_cmd += "%s " % process_vol_options process_vol_cmd += "%s " % process_vol_path process_vol_cmd += "%s " % process_vol_capacity elif process_vol_by == "lvchange" and process_vol_type == "deactivate": process_vol_cmd = "lvchange %s " % ori_vol_path if not process_vol_options: process_vol_options = "-an" process_vol_cmd += process_vol_options else: logging.error(unsupport_err) return rst = process.run(process_vol_cmd, ignore_status=True, shell=True) if rst.exit_status: if "Snapshots of snapshots are not supported" in rst.stderr_text: logging.debug("%s is already a snapshot volume", ori_vol_path) process_vol_name = os.path.basename(ori_vol_path) else: logging.error(rst.stderr_text) return return process_vol_name def check_vol(pool_name, vol_name, expect_exist=True): """ Check volume vol_name in pool pool_name """ src_volumes = src_pv.list_volumes().keys() logging.debug("Current volumes in %s: %s", pool_name, src_volumes) if expect_exist: if vol_name not in src_volumes: test.fail("Can't find volume %s in pool %s" % (vol_name, pool_name)) # check format in volume xml volxml = libvirt_xml.VolXML() post_xml = volxml.new_from_vol_dumpxml(vol_name, pool_name) logging.debug("Volume %s XML: %s" % (vol_name, post_xml.xmltreefile)) if 'format' in post_xml.keys() and vol_format is not None: if post_xml.format != vol_format: test.fail("Volume format %s is not expected" % vol_format + " as defined.") else: if vol_name in src_volumes: test.fail("Find volume %s in pool %s, but expect not" % (vol_name, pool_name)) fmt_err0 = "Unknown file format '%s'" % vol_format fmt_err1 = "Formatting or formatting option not " fmt_err1 += "supported for file format '%s'" % vol_format fmt_err2 = "Driver '%s' does not support " % vol_format fmt_err2 += "image creation" fmt_err_list = [fmt_err0, fmt_err1, fmt_err2] skip_msg = "Volume format '%s' is not supported by qemu-img" % vol_format vol_path_list = [] secret_uuids = [] try: # Create the src pool src_pool_name = "virt-%s-pool" % src_pool_type pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(src_pool_name, src_pool_type, src_pool_target, src_emulated_image, image_size=image_size, source_format=src_pool_format) src_pv = libvirt_storage.PoolVolume(src_pool_name) src_pool_uuid = libvirt_storage.StoragePool().pool_info( src_pool_name)['UUID'] # Print current pools for debugging logging.debug("Current pools:%s", libvirt_storage.StoragePool().list_pools()) # Create volumes by virsh in a loop while pool_vol_num > 0: # Set volume xml file vol_name = prefix_vol_name + "_%s" % pool_vol_num bad_vol_name = params.get("bad_vol_name", "") if bad_vol_name: vol_name = bad_vol_name pool_vol_num -= 1 # disk partition for new volume if src_pool_type == "disk": vol_name = utlv.new_disk_vol_name(src_pool_name) if vol_name is None: test.error("Fail to generate volume name") if by_xml: # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if src_pool_type == "disk": vol_name = utlv.new_disk_vol_name(src_pool_name) if vol_name is None: test.error("Fail to generate volume name") vol_arg['name'] = vol_name volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) if luks_encrypted: # For luks encrypted disk, add related xml in newvol luks_encryption_params = {} luks_encryption_params.update({"format": "luks"}) luks_secret_uuid = create_luks_secret( os.path.join(src_pool_target, vol_name)) secret_uuids.append(luks_secret_uuid) luks_encryption_params.update({ "secret": { "type": encryption_secret_type, "uuid": luks_secret_uuid } }) newvol.encryption = volxml.new_encryption( **luks_encryption_params) vol_xml = newvol['xml'] if params.get('setup_libvirt_polkit') == 'yes': process.run("chmod 666 %s" % vol_xml, ignore_status=True, shell=True) if luks_encrypted and libvirt_version.version_compare( 4, 5, 0): try: polkit = test_setup.LibvirtPolkitConfig(params) polkit_rules_path = polkit.polkit_rules_path with open(polkit_rules_path, 'r+') as f: rule = f.readlines() for index, v in enumerate(rule): if v.find("secret") >= 0: nextline = rule[index + 1] s = nextline.replace( "QEMU", "secret").replace( "pool_name", "secret_uuid").replace( "virt-dir-pool", "%s" % luks_secret_uuid) rule[index + 1] = s rule = ''.join(rule) with open(polkit_rules_path, 'w+') as f: f.write(rule) logging.debug(rule) polkit.polkitd.restart() except IOError as e: logging.error(e) # Run virsh_vol_create to create vol logging.debug("Create volume from XML: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create( src_pool_name, vol_xml, extra_option, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) else: # Run virsh_vol_create_as to create_vol pool_name = src_pool_name if params.get("create_vol_by_pool_uuid") == "yes": pool_name = src_pool_uuid cmd_result = virsh.vol_create_as( vol_name, pool_name, vol_capacity, vol_allocation, vol_format, extra_option, unprivileged_user=unprivileged_user, uri=uri, readonly=virsh_readonly_mode, ignore_status=True, debug=True) # Check result try: utlv.check_exit_status(cmd_result, status_error) check_vol(src_pool_name, vol_name, not status_error) if bad_vol_name: pattern = "volume name '%s' cannot contain '/'" % vol_name logging.debug("pattern: %s", pattern) if "\\" in pattern and by_xml: pattern = pattern.replace("\\", "\\\\") if re.search(pattern, cmd_result.stderr) is None: test.fail("vol-create failed with unexpected reason") if not status_error: vol_path = virsh.vol_path(vol_name, src_pool_name).stdout.strip() logging.debug("Full path of %s: %s", vol_name, vol_path) vol_path_list.append(vol_path) except exceptions.TestFail as detail: stderr = cmd_result.stderr if any(err in stderr for err in fmt_err_list): test.cancel(skip_msg) else: test.fail("Create volume fail:\n%s" % detail) # Post process vol by other programs process_vol_by = params.get("process_vol_by") process_vol_type = params.get("process_vol_type", "") expect_vol_exist = "yes" == params.get("expect_vol_exist", "yes") if process_vol_by and vol_path_list: process_vol = post_process_vol(vol_path_list[0]) if process_vol is not None: try: virsh.pool_refresh(src_pool_name, ignore_status=False) check_vol(src_pool_name, process_vol, expect_vol_exist) except (process.CmdError, exceptions.TestFail) as detail: if process_vol_type == "thin": logging.error(str(detail)) test.cancel("You may encounter bug BZ#1060287") else: test.fail("Fail to refresh pool:\n%s" % detail) else: test.fail("Post process volume failed") finally: # Cleanup # For old version lvm2(2.02.106 or early), deactivate volume group # (destroy libvirt logical pool) will fail if which has deactivated # lv snapshot, so before destroy the pool, we need activate it manually if src_pool_type == 'logical' and vol_path_list: vg_name = vol_path_list[0].split('/')[2] process.run("lvchange -ay %s" % vg_name, shell=True) try: pvt.cleanup_pool(src_pool_name, src_pool_type, src_pool_target, src_emulated_image) for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) except exceptions.TestFail as detail: logging.error(str(detail)) if multipathd_status: multipathd.start()
def run(test, params, env): """ Test DAC in adding nfs pool disk to VM. (1).Init variables for test. (2).Create nfs pool and vol. (3).Attach the nfs pool vol to VM. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing") # Get qemu.conf config variables qemu_user = params.get("qemu_user") qemu_group = params.get("qemu_group") dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about pool vol virt_use_nfs = params.get("virt_use_nfs", "off") nfs_server_dir = params.get("nfs_server_dir", "nfs-server") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") export_options = params.get("export_options", "rw,async,no_root_squash,fsid=0") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") bk_file_name = params.get("bk_file_name") # Get pool vol variables img_tup = ("img_user", "img_group", "img_mode") img_val = [] for i in img_tup: try: img_val.append(int(params.get(i))) except ValueError: raise error.TestNAError("%s value '%s' is not a number." % (i, params.get(i))) img_user, img_group, img_mode = img_val # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Backup domain disk label disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in disks.values(): disk_path = disk['source'] f = os.open(disk_path, 0) stat_re = os.fstat(f) backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid, stat_re.st_gid) os.close(f) # Backup selinux status of host. backup_sestatus = utils_selinux.get_status() pvt = None snapshot_name = None disk_snap_path = [] qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # chown domain disk to qemu:qemu to avoid fail on local disk for disk in disks.values(): disk_path = disk['source'] if qemu_user == "root": os.chown(disk_path, 0, 0) elif qemu_user == "qemu": os.chown(disk_path, 107, 107) # Set selinux of host. utils_selinux.set_status(host_sestatus) # set qemu conf qemu_conf.user = qemu_user qemu_conf.group = qemu_user if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() # Create dst pool for create attach vol img logging.debug("export_options is: %s" % export_options) pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], export_options=export_options) # set virt_use_nfs result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs) if result.exit_status: raise error.TestNAError("Failed to set virt_use_nfs value") # Init a QemuImg instance and create img on nfs server dir. params['image_name'] = vol_name tmp_dir = data_dir.get_tmp_dir() nfs_path = os.path.join(tmp_dir, nfs_server_dir) image = qemu_storage.QemuImg(params, nfs_path, vol_name) # Create a image. server_img_path, result = image.create(params) if params.get("image_name_backing_file"): params['image_name'] = bk_file_name params['has_backing_file'] = "yes" image = qemu_storage.QemuImg(params, nfs_path, bk_file_name) server_img_path, result = image.create(params) # Get vol img path vol_name = server_img_path.split('/')[-1] virsh.pool_refresh(pool_name, debug=True) cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() # Do the attach action. extra = "--persistent --subdriver qcow2" result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: raise error.TestFail("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Change img ownership and mode on nfs server dir os.chown(server_img_path, img_user, img_group) os.chmod(server_img_path, img_mode) img_label_before = check_ownership(server_img_path) if img_label_before: logging.debug("attached image ownership on nfs server before " "start: %s" % img_label_before) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. img_label_after = check_ownership(server_img_path) if img_label_after: logging.debug("attached image ownership on nfs server after" " start: %s" % img_label_after) if status_error: raise error.TestFail('Test succeeded in negative case.') except virt_vm.VMStartError, e: # Starting VM failed. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) if params.get("image_name_backing_file"): options = "--disk-only" snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if not status_error: raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_name: disks_snap = vm.get_disk_devices() for disk in disks_snap.values(): disk_snap_path.append(disk['source']) virsh.snapshot_delete(vm_name, snapshot_name, "--metadata", debug=True) try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", debug=True) except error.CmdError: raise error.TestFail("Detach disk 'vdf' from VM %s failed." % vm.name)