def is_lv_deleted(): lv_utils.lv_remove(self.vgname, self.lvname) time.sleep(5) lv_utils.vg_remove(self.vgname) if lv_utils.lv_check(self.vgname, self.lvname): return False return True
def pre_cleanup(self): """ cleanup the disk and directory before test starts on it """ self.log.info("Pre_cleaning of disk and diretories...") disk_list = [ '/dev/mapper/avocado_vg-avocado_lv', self.raid_name, self.disk ] for disk in disk_list: self.delete_fs(disk) self.log.info("checking ...lv/vg existance...") if lv_utils.lv_check(self.vgname, self.lvname): self.log.info("found lv existance... deleting it") self.delete_lv() elif lv_utils.vg_check(self.vgname): self.log.info("found vg existance ... deleting it") lv_utils.vg_remove(self.vgname) else: self.log.info("No VG/LV detected") self.log.info("checking for sraid existance...") if self.sraid.exists(): self.log.info("found sraid existance... deleting it") self.delete_raid() else: self.log.info("No softwareraid detected ") self.log.info("\n End of pre_cleanup")
def delete_lv(self): """ Clear all PV,VG, LV and snapshots created by the test. """ if lv_utils.lv_check(self.vg_name, self.lv_name): lv_utils.lv_remove(self.vg_name, self.lv_name) if lv_utils.vg_check(self.vg_name): lv_utils.vg_remove(self.vg_name)
def delete_lv(self): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_utils.lv_remove(vgname, lvname) lv_utils.vg_remove(vgname)
def tearDown(self): self.tmpdir.cleanup() for vg_name in self.vgs: lv_utils.vg_remove(vg_name)
def tearDown(self): shutil.rmtree(self.tmpdir) for vg_name in self.vgs: lv_utils.vg_remove(vg_name)
def run(test, params, env): """ DiskXML has an attribute named discard for fstrim operations. (Only supported after special libvirt version.) These are test cases for it: """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) if vm.is_dead(): vm.start() vm.wait_for_login() bf_disks = get_vm_disks(vm) vm.destroy() # Create a new vm for test, undefine it at last new_vm_name = "%s_discardtest" % vm.name if not utlv.define_new_vm(vm.name, new_vm_name): test.error("Define new vm failed.") try: new_vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) except Exception as detail: test.error("Create new vm failed:%s" % detail) disk_type = params.get("disk_type", "file") discard_device = params.get("discard_device", "/DEV/EXAMPLE") fstrim_type = params.get("fstrim_type", "fstrim_cmd") try: if disk_type == "file": device_dir = data_dir.get_tmp_dir() params["image_name"] = "discard_test" params["image_format"] = "raw" params["image_size"] = "1G" qs = qemu_storage.QemuImg(params, device_dir, "") device_path, _ = qs.create(params) else: if not discard_device.count("/DEV/EXAMPLE"): create_iscsi = False else: create_iscsi = True discard_device = create_iscsi_device(test) device_path = create_volume(discard_device) discard_type = params.get("discard_type", "ignore") target_bus = params.get("storage_target_bus", "virtio") target_dev = params.get("storage_target_dev", "vdb") status_error = "yes" == params.get("status_error", "no") xmlfile = create_disk_xml(disk_type, device_path, discard_type, target_dev, target_bus) virsh.attach_device(new_vm_name, xmlfile, flagstr="--persistent", ignore_status=False) if fstrim_type == "qemu-guest-agent": channelfile = prepare_channel_xml(new_vm_name) virsh.attach_device(new_vm_name, channelfile, flagstr="--persistent", ignore_status=False) logging.debug("New VMXML:\n%s", virsh.dumpxml(new_vm_name)) # Verify attached device in vm if new_vm.is_dead(): new_vm.start() new_vm.wait_for_login() af_disks = get_vm_disks(new_vm) logging.debug("\nBefore:%s\nAfter:%s", bf_disks, af_disks) # Get new disk name in vm new_disk = "".join(list(set(bf_disks) ^ set(af_disks))) if not new_disk: test.fail("Can not get attached device in vm.") logging.debug("Attached device in vm:%s", new_disk) # Occupt space of new disk frmt_type = params.get("discard_format", "ext4") if fstrim_type == "mount_with_discard": mount_options = "discard" else: mount_options = None bf_cpy = get_disk_capacity(test, disk_type, imagefile=device_path, lvname="lvthin") logging.debug("Disk size before using:%s", bf_cpy) occupy_disk(new_vm, new_disk, "500", frmt_type, mount_options) bf_fstrim_cpy = get_disk_capacity(test, disk_type, imagefile=device_path, lvname="lvthin") logging.debug("Disk size after used:%s", bf_fstrim_cpy) do_fstrim(test, fstrim_type, new_vm, status_error) af_fstrim_cpy = get_disk_capacity(test, disk_type, imagefile=device_path, lvname="lvthin") logging.debug("\nBefore occupying disk:%s\n" "After occupied disk:%s\n" "After fstrim operation:%s", bf_cpy, bf_fstrim_cpy, af_fstrim_cpy) # Check results if fstrim_type in ["fstrim_cmd", "qemu-guest-agent"]: if not sig_delta(bf_fstrim_cpy, af_fstrim_cpy) and \ not status_error: test.fail("Manual 'fstrims' didn't work.") elif fstrim_type == "mount_with_discard": if sig_delta(bf_cpy, bf_fstrim_cpy) and not status_error: test.fail("Automatic 'fstrims' didn't work.") finally: if new_vm.is_alive(): new_vm.destroy() new_vm.undefine() if disk_type == "block": try: lv_utils.lv_remove("vgthin", "lvthin") except exceptions.TestError as detail: logging.debug(str(detail)) try: lv_utils.vg_remove("vgthin") except exceptions.TestError as detail: logging.debug(str(detail)) process.run("pvremove -f %s" % discard_device, ignore_status=True, shell=True) if create_iscsi: utlv.setup_or_cleanup_iscsi(is_setup=False)