def test_basic_workflow(self): """ Check the basic workflow works using ramdisk """ ramdisk_filename = vg_ramdisk_dir = loop_device = None vg_name = "avocado_testing_vg_e5kj3erv11a" lv_name = "avocado_testing_lv_lk0ff33al5h" ramdisk_basedir = os.path.join(self.tmpdir, "foo", "bar") mount_loc = os.path.join(self.tmpdir, "lv_mount_location") os.mkdir(mount_loc) try: # Create ramdisk vg self.assertFalse(os.path.exists(ramdisk_basedir)) self.assertFalse(lv_utils.vg_check(vg_name)) spec = lv_utils.vg_ramdisk(False, vg_name, 10, ramdisk_basedir, "sparse_file") ramdisk_filename, vg_ramdisk_dir, vg_name, loop_device = spec # Check it was created properly self.assertTrue(ramdisk_filename) self.assertTrue(vg_ramdisk_dir) self.assertTrue(vg_name) self.assertTrue(loop_device) self.assertTrue(os.path.exists(ramdisk_basedir)) self.assertTrue(glob.glob(os.path.join(ramdisk_basedir, "*"))) self.assertTrue(lv_utils.vg_check(vg_name)) vgs = lv_utils.vg_list() self.assertIn(vg_name, vgs) # Can't create existing vg self.assertRaises(lv_utils.LVException, lv_utils.vg_create, vg_name, loop_device) # Create and check LV lv_utils.lv_create(vg_name, lv_name, 1) lv_utils.lv_check(vg_name, lv_name) self.assertIn(vg_name, process.run("lvs --all", sudo=True).stdout_text) self.assertIn(lv_name, lv_utils.lv_list()) lv_utils.lv_mount(vg_name, lv_name, mount_loc, "ext2") lv_utils.lv_umount(vg_name, lv_name) lv_utils.lv_remove(vg_name, lv_name) self.assertNotIn(lv_name, lv_utils.lv_list()) # Cleanup ramdisk vgs lv_utils.vg_ramdisk_cleanup(ramdisk_filename, vg_ramdisk_dir, vg_name, loop_device) self.assertTrue(os.path.exists(ramdisk_basedir)) self.assertFalse(glob.glob(os.path.join(ramdisk_basedir, "*"))) except BaseException as details: try: process.run("mountpoint %s && umount %s" % (mount_loc, mount_loc), shell=True, sudo=True) except BaseException as details: print("Fail to unmount LV: %s" % details) try: lv_utils.lv_remove(vg_name, lv_name) except BaseException as details: print("Fail to cleanup LV: %s" % details) try: lv_utils.vg_ramdisk_cleanup(ramdisk_filename, vg_ramdisk_dir, vg_name, loop_device) except BaseException as details: print("Fail to cleanup vg_ramdisk: %s" % details)
def is_lv_deleted(): lv_utils.lv_remove(self.vgname, self.lvname) time.sleep(5) lv_utils.vg_remove(self.vgname) if lv_utils.lv_check(self.vgname, self.lvname): return False return True
def get(cls, params, object=None): """ Retrieve a state disregarding the current changes. All arguments match the base class. """ vm_name = params["vms"] mount_loc = cls._get_image_mount_loc(params) params["lv_snapshot_name"] = params["get_state"] if mount_loc: # mount to avoid not-mounted errors try: lv_utils.lv_mount(params["vg_name"], params["lv_pointer_name"], mount_loc) except lv_utils.LVException: pass lv_utils.lv_umount(params["vg_name"], params["lv_pointer_name"]) try: logging.info("Restoring %s to state %s", vm_name, params["get_state"]) lv_utils.lv_remove(params["vg_name"], params["lv_pointer_name"]) lv_utils.lv_take_snapshot(params["vg_name"], params["lv_snapshot_name"], params["lv_pointer_name"]) finally: if mount_loc: lv_utils.lv_mount(params["vg_name"], params["lv_pointer_name"], mount_loc)
def delete_lv(self): """ Clear all PV,VG, LV and snapshots created by the test. """ if lv_utils.lv_check(self.vg_name, self.lv_name): lv_utils.lv_remove(self.vg_name, self.lv_name) if lv_utils.vg_check(self.vg_name): lv_utils.vg_remove(self.vg_name)
def unset(cls, params, object=None): """ Remove a state with previous changes. All arguments match the base class and in addition: :raises: :py:class:`ValueError` if LV pointer state was used """ vm_name = params["vms"] lv_pointer = params["lv_pointer_name"] if params["unset_state"] == lv_pointer: raise ValueError("Cannot unset built-in state '%s'" % lv_pointer) params["lv_snapshot_name"] = params["unset_state"] logging.info("Removing snapshot %s of %s", params["lv_snapshot_name"], vm_name) lv_utils.lv_remove(params["vg_name"], params["lv_snapshot_name"])
def _get_state(vm, vm_params): """ Get to an online/offline state of a vm object. We use LVM for offline snapshots and QCOW2 for online snapshots. """ vm_name = vm_params["vms"] if vm_params["get_state"] in OFFLINE_ROOTS + ONLINE_ROOTS: # reusing root states (offline root and online boot) is analogical to not doing anything return if vm_params["get_type"] == "offline": vm_params["lv_snapshot_name"] = vm_params["get_state"] if vm_params.get("image_raw_device", "yes") == "no": mount_loc = os.path.dirname(vm_params["image_name"]) try: lv_utils.lv_umount(vm_params["vg_name"], vm_params["lv_pointer_name"]) except lv_utils.LVException: pass try: logging.info("Restoring %s to state %s", vm_name, vm_params["get_state"]) lv_utils.lv_remove(vm_params["vg_name"], vm_params["lv_pointer_name"]) lv_utils.lv_take_snapshot(vm_params["vg_name"], vm_params["lv_snapshot_name"], vm_params["lv_pointer_name"]) finally: if vm_params.get("image_raw_device", "yes") == "no": mount_loc = os.path.dirname(vm_params["image_name"]) lv_utils.lv_mount(vm_params["vg_name"], vm_params["lv_pointer_name"], mount_loc) else: logging.info("Reusing online state '%s' of %s", vm_params["get_state"], vm_name) vm.pause() # NOTE: second online type is available and still supported but not recommended if vm_params["get_type"] != "ramfile": vm.loadvm(vm_params["get_state"]) else: state_dir = vm_params.get("image_name", "") state_dir = os.path.dirname(state_dir) state_file = os.path.join(state_dir, vm_params["get_state"]) state_file = "%s.state" % state_file vm.restore_from_file(state_file) vm.resume()
def _unset_state(vm, vm_params): """ Unset an online/offline state of a vm object. We use LVM for offline snapshots and QCOW2 for online snapshots. """ vm_name = vm_params["vms"] if vm_params["unset_state"] in OFFLINE_ROOTS: # offline switch to protect from online leftover state if vm is not None and vm.is_alive(): vm.destroy(gracefully=False) # vm_params["vms"] = vm_name vm_params["main_vm"] = vm_name unset_root(vm_params) elif vm_params["unset_type"] == "offline": lv_pointer = vm_params["lv_pointer_name"] if vm_params["unset_state"] == lv_pointer: raise ValueError("Cannot unset built-in offline state '%s'" % lv_pointer) vm_params["lv_snapshot_name"] = vm_params["unset_state"] logging.info("Removing snapshot %s of %s", vm_params["lv_snapshot_name"], vm_name) lv_utils.lv_remove(vm_params["vg_name"], vm_params["lv_snapshot_name"]) elif vm_params["unset_state"] in ONLINE_ROOTS: if vm is not None and vm.is_alive(): vm.destroy(gracefully=False) else: logging.info("Removing online state '%s' of %s", vm_params["unset_state"], vm_name) vm.pause() # NOTE: second online type is available and still supported but not recommended if vm_params["unset_type"] != "ramfile": # NOTE: this was supposed to be implemented in the Qemu VM object but # it is not unlike savevm and loadvm, perhaps due to command availability vm.verify_status('paused') logging.debug("Deleting VM %s from %s", vm_name, vm_params["unset_state"]) vm.monitor.send_args_cmd("delvm id=%s" % vm_params["unset_state"]) vm.verify_status('paused') else: state_dir = vm_params.get("image_name", "") state_dir = os.path.dirname(state_dir) state_file = os.path.join(state_dir, vm_params["set_state"]) state_file = "%s.state" % state_file os.unlink(state_file) vm.resume()
def delete_lv(self): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_utils.lv_remove(vgname, lvname) lv_utils.vg_remove(vgname)
def run(test, params, env): """ DiskXML has an attribute named discard for fstrim operations. (Only supported after special libvirt version.) These are test cases for it: """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) if vm.is_dead(): vm.start() vm.wait_for_login() bf_disks = get_vm_disks(vm) vm.destroy() # Create a new vm for test, undefine it at last new_vm_name = "%s_discardtest" % vm.name if not utlv.define_new_vm(vm.name, new_vm_name): test.error("Define new vm failed.") try: new_vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) except Exception as detail: test.error("Create new vm failed:%s" % detail) disk_type = params.get("disk_type", "file") discard_device = params.get("discard_device", "/DEV/EXAMPLE") fstrim_type = params.get("fstrim_type", "fstrim_cmd") try: if disk_type == "file": device_dir = data_dir.get_tmp_dir() params["image_name"] = "discard_test" params["image_format"] = "raw" params["image_size"] = "1G" qs = qemu_storage.QemuImg(params, device_dir, "") device_path, _ = qs.create(params) else: if not discard_device.count("/DEV/EXAMPLE"): create_iscsi = False else: create_iscsi = True discard_device = create_iscsi_device(test) device_path = create_volume(discard_device) discard_type = params.get("discard_type", "ignore") target_bus = params.get("storage_target_bus", "virtio") target_dev = params.get("storage_target_dev", "vdb") status_error = "yes" == params.get("status_error", "no") xmlfile = create_disk_xml(disk_type, device_path, discard_type, target_dev, target_bus) virsh.attach_device(new_vm_name, xmlfile, flagstr="--persistent", ignore_status=False) if fstrim_type == "qemu-guest-agent": channelfile = prepare_channel_xml(new_vm_name) virsh.attach_device(new_vm_name, channelfile, flagstr="--persistent", ignore_status=False) logging.debug("New VMXML:\n%s", virsh.dumpxml(new_vm_name)) # Verify attached device in vm if new_vm.is_dead(): new_vm.start() new_vm.wait_for_login() af_disks = get_vm_disks(new_vm) logging.debug("\nBefore:%s\nAfter:%s", bf_disks, af_disks) # Get new disk name in vm new_disk = "".join(list(set(bf_disks) ^ set(af_disks))) if not new_disk: test.fail("Can not get attached device in vm.") logging.debug("Attached device in vm:%s", new_disk) # Occupt space of new disk frmt_type = params.get("discard_format", "ext4") if fstrim_type == "mount_with_discard": mount_options = "discard" else: mount_options = None bf_cpy = get_disk_capacity(test, disk_type, imagefile=device_path, lvname="lvthin") logging.debug("Disk size before using:%s", bf_cpy) occupy_disk(new_vm, new_disk, "500", frmt_type, mount_options) bf_fstrim_cpy = get_disk_capacity(test, disk_type, imagefile=device_path, lvname="lvthin") logging.debug("Disk size after used:%s", bf_fstrim_cpy) do_fstrim(test, fstrim_type, new_vm, status_error) af_fstrim_cpy = get_disk_capacity(test, disk_type, imagefile=device_path, lvname="lvthin") logging.debug("\nBefore occupying disk:%s\n" "After occupied disk:%s\n" "After fstrim operation:%s", bf_cpy, bf_fstrim_cpy, af_fstrim_cpy) # Check results if fstrim_type in ["fstrim_cmd", "qemu-guest-agent"]: if not sig_delta(bf_fstrim_cpy, af_fstrim_cpy) and \ not status_error: test.fail("Manual 'fstrims' didn't work.") elif fstrim_type == "mount_with_discard": if sig_delta(bf_cpy, bf_fstrim_cpy) and not status_error: test.fail("Automatic 'fstrims' didn't work.") finally: if new_vm.is_alive(): new_vm.destroy() new_vm.undefine() if disk_type == "block": try: lv_utils.lv_remove("vgthin", "lvthin") except exceptions.TestError as detail: logging.debug(str(detail)) try: lv_utils.vg_remove("vgthin") except exceptions.TestError as detail: logging.debug(str(detail)) process.run("pvremove -f %s" % discard_device, ignore_status=True, shell=True) if create_iscsi: utlv.setup_or_cleanup_iscsi(is_setup=False)
def set_state(run_params, env): """ Save cleanup states for vms with `set_state` parameter. :param run_params: configuration parameters :type run_params: {str, str} :param env: test environment :type env: Env object :raises: :py:class:`exceptions.TestAbortError` if unexpected/missing snapshot in passive mode (abort) :raises: :py:class:`exceptions.TestError` if invalid policy was used """ for vm_name in run_params.objects("vms"): vm = env.get_vm(vm_name) vm_params = run_params.object_params(vm_name) # if the snapshot is not defined skip (leaf tests that are no setup) if not vm_params.get("set_state"): continue vm_params["set_type"] = vm_params.get("set_type", "any") vm_params["set_mode"] = vm_params.get("set_mode", "ff") vm_params["vms"] = vm_name vm_params["check_type"] = vm_params["set_type"] vm_params["check_state"] = vm_params["set_state"] state_exists = check_state(vm_params, env) # if too many or no matches default to most performant type vm_params["set_type"] = vm_params["found_type_%s" % vm_name] # online/offline filter if vm_params["set_type"] in run_params.get("skip_types", []): logging.debug("Skip setting states of types %s" % ", ".join(run_params.objects("skip_types"))) continue if vm_params["set_type"] == "offline": vm.destroy(gracefully=True) # NOTE: setting an online state assumes that the vm is online just like # setting an offline state assumes that the vm already exists action_if_exists = vm_params["set_mode"][0] action_if_doesnt_exist = vm_params["set_mode"][1] if state_exists and "a" == action_if_exists: logging.info("Aborting because of unwanted snapshot for later cleanup") raise exceptions.TestAbortError("Snapshot '%s' of %s already exists. Aborting " "due to passive mode." % (vm_params["set_state"], vm_name)) elif state_exists and "r" == action_if_exists: logging.info("Keeping the already existing snapshot untouched") elif state_exists and "f" == action_if_exists: logging.info("Overwriting the already existing snapshot") if vm_params["set_state"] in OFFLINE_ROOTS and vm_params["set_type"] == "offline": unset_root(vm_params) elif vm_params["set_type"] == "offline": vm_params["lv_snapshot_name"] = vm_params["set_state"] lv_utils.lv_remove(vm_params["vg_name"], vm_params["lv_snapshot_name"]) else: logging.debug("Overwriting online snapshot simply by writing it again") _set_state(vm, vm_params) elif state_exists: raise exceptions.TestError("Invalid policy %s: The end action on present state can be " "either of 'abort', 'reuse', 'force'." % vm_params["set_mode"]) elif not state_exists and "a" == action_if_doesnt_exist: logging.info("Aborting because of missing snapshot for later cleanup") raise exceptions.TestAbortError("Snapshot '%s' of %s doesn't exist. Aborting " "due to passive mode." % (vm_params["set_state"], vm_name)) elif not state_exists and "f" == action_if_doesnt_exist: _set_state(vm, vm_params) elif not state_exists: raise exceptions.TestError("Invalid policy %s: The end action on missing state can be " "either of 'abort', 'force'." % vm_params["set_mode"])