def test_lv_activate_deactivate(tmp_storage, read_only): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024, activate=False) lvm.set_read_only(read_only) lv = lvm.getLV(vg_name, lv_name) assert not lv.active # Activate the inactive lv. lvm.activateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert lv.active # Deactivate the active lv. lvm.deactivateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert not lv.active
def test_lv_create_remove(tmp_storage, read_only): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_any = "lv-on-any-device" lv_specific = "lv-on-device-2" # Creating VG and LV requires read-write mode. lvm.set_read_only(False) lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) # Create the first LV on any device. lvm.createLV(vg_name, lv_any, 1024) # Getting lv must work in both read-only and read-write modes. lvm.set_read_only(read_only) lv = lvm.getLV(vg_name, lv_any) assert lv.name == lv_any assert lv.vg_name == vg_name assert int(lv.size) == 1024**3 assert lv.tags == () assert lv.writeable assert not lv.opened assert lv.active # LV typically created on dev1. device, extent = lvm.getFirstExt(vg_name, lv_any) assert device in dev1, dev2 assert extent == "0" # Create the second LV on dev2 - reuquires read-write mode. lvm.set_read_only(False) lvm.createLV(vg_name, lv_specific, 1024, device=dev2) # Testing LV must work in both read-only and read-write modes. lvm.set_read_only(read_only) device, extent = lvm.getFirstExt(vg_name, lv_specific) assert device == dev2 # Remove both LVs - requires read-write mode. lvm.set_read_only(False) lvm.removeLVs(vg_name, [lv_any, lv_specific]) # Testing if lv exists most work in both read-only and read-write. lvm.set_read_only(read_only) for lv_name in (lv_any, lv_specific): with pytest.raises(se.LogicalVolumeDoesNotExistError): lvm.getLV(vg_name, lv_name)
def _create(cls, dom, imgUUID, volUUID, size, volFormat, preallocate, volParent, srcImgUUID, srcVolUUID, volPath, initialSize=None): """ Class specific implementation of volumeCreate. All the exceptions are properly handled and logged in volume.create() """ lvSize = cls.calculate_volume_alloc_size(preallocate, size, initialSize) lvm.createLV(dom.sdUUID, volUUID, lvSize, activate=True, initialTags=(sc.TAG_VOL_UNINIT,)) fileutils.rm_file(volPath) lvPath = lvm.lvPath(dom.sdUUID, volUUID) cls.log.info("Creating volume symlink from %r to %r", lvPath, volPath) os.symlink(lvPath, volPath) if not volParent: cls.log.info("Request to create %s volume %s with size = %s " "blocks", sc.type2name(volFormat), volPath, size) if volFormat == sc.COW_FORMAT: operation = qemuimg.create(volPath, size=size * BLOCK_SIZE, format=sc.fmt2str(volFormat), qcow2Compat=dom.qcow2_compat()) operation.run() else: # Create hardlink to template and its meta file cls.log.info("Request to create snapshot %s/%s of volume %s/%s " "with size %s (blocks)", imgUUID, volUUID, srcImgUUID, srcVolUUID, size) volParent.clone(volPath, volFormat, size) with dom.acquireVolumeMetadataSlot(volUUID) as slot: mdTags = ["%s%s" % (sc.TAG_PREFIX_MD, slot), "%s%s" % (sc.TAG_PREFIX_PARENT, srcVolUUID), "%s%s" % (sc.TAG_PREFIX_IMAGE, imgUUID)] lvm.changeLVTags(dom.sdUUID, volUUID, delTags=[sc.TAG_VOL_UNINIT], addTags=mdTags) try: lvm.deactivateLVs(dom.sdUUID, [volUUID]) except se.CannotDeactivateLogicalVolume: cls.log.warn("Cannot deactivate new created volume %s/%s", dom.sdUUID, volUUID, exc_info=True) return (dom.sdUUID, slot)
def test_bootstrap(tmp_storage, read_only): dev_size = 20 * 1024**3 lvm.set_read_only(False) dev1 = tmp_storage.create_device(dev_size) vg1_name = str(uuid.uuid4()) lvm.createVG(vg1_name, [dev1], "initial-tag", 128) dev2 = tmp_storage.create_device(dev_size) vg2_name = str(uuid.uuid4()) lvm.createVG(vg2_name, [dev2], "initial-tag", 128) vgs = (vg1_name, vg2_name) for vg_name in vgs: # Create active lvs. for lv_name in ("skip", "prepared", "opened", "unused"): lvm.createLV(vg_name, lv_name, 1024) # Create links to prepared lvs. img_dir = os.path.join(sc.P_VDSM_STORAGE, vg_name, "img") os.makedirs(img_dir) os.symlink( lvm.lvPath(vg_name, "prepared"), os.path.join(img_dir, "prepared")) # Open some lvs during bootstrap. vg1_opened = lvm.lvPath(vg1_name, "opened") vg2_opened = lvm.lvPath(vg2_name, "opened") with open(vg1_opened), open(vg2_opened): lvm.set_read_only(read_only) lvm.bootstrap(skiplvs=["skip"]) # Lvs in skiplvs, prepared lvs, and opened lvs should be active. for vg_name in vgs: for lv_name in ("skip", "prepared", "opened"): lv = lvm.getLV(vg_name, lv_name) assert lv.active # Unused lvs should not be active. for vg_name in vgs: lv = lvm.getLV(vg_name, "unused") assert not lv.active
def test_lv_rename(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) new_lv_name = "renamed-" + lv_name lvm.renameLV(vg_name, lv_name, new_lv_name) lv = lvm.getLV(vg_name, new_lv_name) assert lv.name == new_lv_name
def _create_lv_artifact(self, parent, lv_size): try: lv = lvm.getLV(self.sd_manifest.sdUUID, self.vol_id) except se.LogicalVolumeDoesNotExistError: pass else: if sc.TEMP_VOL_LVTAG in lv.tags: raise se.DomainHasGarbage("Logical volume artifact %s exists" % self.vol_id) else: raise se.VolumeAlreadyExists("Logical volume %s exists" % self.vol_id) parent_vol_id = parent.vol_id if parent else sc.BLANK_UUID tags = (sc.TEMP_VOL_LVTAG, sc.TAG_PREFIX_PARENT + parent_vol_id, sc.TAG_PREFIX_IMAGE + self.img_id) lvm.createLV(self.sd_manifest.sdUUID, self.vol_id, lv_size, activate=True, initialTags=tags)
def test_lv_add_delete_tags(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024, activate=False) lvm.changeLVTags( vg_name, lv_name, delTags=("initial-tag",), addTags=("new-tag-1", "new-tag-2")) lv = lvm.getLV(vg_name, lv_name) assert sorted(lv.tags) == ["new-tag-1", "new-tag-2"]
def test_lv_extend_reduce(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) lvm.extendLV(vg_name, lv_name, 2048) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * 1024**3 # Reducing active LV requires force. lvm.reduceLV(vg_name, lv_name, 1024, force=True) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 1 * 1024**3
def test_lv_refresh(tmp_storage, read_only): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lv_fullname = "{}/{}".format(vg_name, lv_name) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) lvm.set_read_only(read_only) # Simulate extending the LV on the SPM. commands.run([ "lvextend", "--config", tmp_storage.lvm_config(), "-L+1g", lv_fullname ]) # Refreshing LV invalidates the cache to pick up changes from storage. lvm.refreshLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * 1024**3 # Simulate extending the LV on the SPM. commands.run([ "lvextend", "--config", tmp_storage.lvm_config(), "-L+1g", lv_fullname ]) # Activate active LV refreshes it. lvm.activateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 3 * 1024**3
def test_lv_refresh(tmp_storage, read_only): dev_size = 20 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lv_fullname = "{}/{}".format(vg_name, lv_name) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) lvm.set_read_only(read_only) # Simulate extending the LV on the SPM. commands.run([ "lvextend", "--config", tmp_storage.lvm_config(), "-L+1g", lv_fullname ]) # Refreshing LV invalidates the cache to pick up changes from storage. lvm.refreshLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * GiB # Simulate extending the LV on the SPM. commands.run([ "lvextend", "--config", tmp_storage.lvm_config(), "-L+1g", lv_fullname ]) # Activate active LV refreshes it. lvm.activateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 3 * GiB
def test_vg_invalidate_lvs_pvs(tmp_storage): dev_size = 1 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, "lv1", 128, activate=False) # Reload cache. pv = lvm.getPV(dev) vg = lvm.getVG(vg_name) lv = lvm.getLV(vg_name)[0] assert lvm._lvminfo._pvs == {dev: pv} clear_stats() lvm._lvminfo.getPvs(vg_name) # getPVs() first finds the VG using getVG(), so there is a cache hit. # No stale PVs for the VG so getPVs() will have another cache hit. check_stats(hits=2, misses=0) assert lvm._lvminfo._vgs == {vg_name: vg} assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lv} # Invalidate VG including LVs and PVs. lvm.invalidateVG(vg_name, invalidatePVs=True) assert lvm._lvminfo._vgs == {vg_name: lvm.Stale(vg_name)} assert lvm._lvminfo._pvs == {dev: lvm.Stale(dev)} clear_stats() lvm._lvminfo.getPvs(vg_name) # getPVs() will not find the invalidated VG in cache, so there is a miss. # There are stale PVs for the VG so getPVs() will have another cache miss. check_stats(hits=0, misses=2) assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lvm.Stale("lv1")}
def test_lv_add_delete_tags(tmp_storage): dev_size = 20 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv1_name = str(uuid.uuid4()) lv2_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv1_name, 1024, activate=False) lvm.createLV(vg_name, lv2_name, 1024, activate=False) lvm.changeLVsTags(vg_name, (lv1_name, lv2_name), delTags=("initial-tag", ), addTags=("new-tag-1", "new-tag-2")) lv1 = lvm.getLV(vg_name, lv1_name) lv2 = lvm.getLV(vg_name, lv2_name) assert sorted(lv1.tags) == ["new-tag-1", "new-tag-2"] assert sorted(lv2.tags) == ["new-tag-1", "new-tag-2"]
def test_vg_invalidate_lvs(tmp_storage): dev_size = 1 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, "lv1", 128, activate=False) # Reload cache. pv = lvm.getPV(dev) vg = lvm.getVG(vg_name) clear_stats() lv = lvm.getLV(vg_name)[0] check_stats(hits=0, misses=1) # Accessing LVs always access storage. # TODO: Use cache if VG did not change. lvm.getLV(vg_name) check_stats(hits=0, misses=2) assert lvm._lvminfo._pvs == {dev: pv} assert lvm._lvminfo._vgs == {vg_name: vg} assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lv} # Invalidate VG including LVs. lvm.invalidateVG(vg_name) assert lvm._lvminfo._pvs == {dev: pv} assert lvm._lvminfo._vgs == {vg_name: lvm.Stale(vg_name)} assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lvm.Stale("lv1")} # Accessing LVs always access storage. # TODO: Use cache if VG did not change. clear_stats() lvm.getLV(vg_name) check_stats(hits=0, misses=1)
def test_lv_extend_reduce(tmp_storage): dev_size = 20 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) lvm.extendLV(vg_name, lv_name, 2048) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * GiB # Extending LV to same does nothing. lvm.extendLV(vg_name, lv_name, 2048) lvm.invalidateVG(vg_name) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * GiB # Extending LV to smaller size does nothing. lvm.extendLV(vg_name, lv_name, 1024) lvm.invalidateVG(vg_name) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * GiB # Reducing active LV requires force. lvm.reduceLV(vg_name, lv_name, 1024, force=True) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 1 * GiB
def _create(cls, dom, imgUUID, volUUID, capacity, volFormat, preallocate, volParent, srcImgUUID, srcVolUUID, volPath, initial_size=None): """ Class specific implementation of volumeCreate. All the exceptions are properly handled and logged in volume.create() """ lv_size = cls.calculate_volume_alloc_size(preallocate, volFormat, capacity, initial_size) lv_size_mb = utils.round(lv_size, MiB) // MiB lvm.createLV(dom.sdUUID, volUUID, lv_size_mb, activate=True, initialTags=(sc.TAG_VOL_UNINIT, )) fileutils.rm_file(volPath) lvPath = lvm.lvPath(dom.sdUUID, volUUID) cls.log.info("Creating volume symlink from %r to %r", lvPath, volPath) os.symlink(lvPath, volPath) if not volParent: cls.log.info("Request to create %s volume %s with capacity = %s", sc.type2name(volFormat), volPath, capacity) if volFormat == sc.COW_FORMAT: operation = qemuimg.create(volPath, size=capacity, format=sc.fmt2str(volFormat), qcow2Compat=dom.qcow2_compat()) operation.run() else: # Create hardlink to template and its meta file cls.log.info( "Request to create snapshot %s/%s of volume %s/%s " "with capacity %s", imgUUID, volUUID, srcImgUUID, srcVolUUID, capacity) volParent.clone(volPath, volFormat, capacity) with dom.acquireVolumeMetadataSlot(volUUID) as slot: mdTags = [ "%s%s" % (sc.TAG_PREFIX_MD, slot), "%s%s" % (sc.TAG_PREFIX_PARENT, srcVolUUID), "%s%s" % (sc.TAG_PREFIX_IMAGE, imgUUID) ] lvm.changeLVsTags(dom.sdUUID, (volUUID, ), delTags=[sc.TAG_VOL_UNINIT], addTags=mdTags) try: lvm.deactivateLVs(dom.sdUUID, [volUUID]) except se.CannotDeactivateLogicalVolume: cls.log.warn("Cannot deactivate new created volume %s/%s", dom.sdUUID, volUUID, exc_info=True) return (dom.sdUUID, slot)
def _create(cls, dom, imgUUID, volUUID, size, volFormat, preallocate, volParent, srcImgUUID, srcVolUUID, volPath, initialSize=None): """ Class specific implementation of volumeCreate. All the exceptions are properly handled and logged in volume.create() """ lvSize = cls.calculate_volume_alloc_size(preallocate, size, initialSize) lvm.createLV(dom.sdUUID, volUUID, "%s" % lvSize, activate=True, initialTags=(sc.TAG_VOL_UNINIT, )) fileutils.rm_file(volPath) os.symlink(lvm.lvPath(dom.sdUUID, volUUID), volPath) if not volParent: cls.log.info( "Request to create %s volume %s with size = %s " "sectors", sc.type2name(volFormat), volPath, size) if volFormat == sc.COW_FORMAT: qemuimg.create(volPath, size=size * BLOCK_SIZE, format=sc.fmt2str(volFormat), qcow2Compat=dom.qcow2_compat()) else: # Create hardlink to template and its meta file cls.log.info("Request to create snapshot %s/%s of volume %s/%s", imgUUID, volUUID, srcImgUUID, srcVolUUID) volParent.clone(volPath, volFormat) with dom.acquireVolumeMetadataSlot(volUUID, sc.VOLUME_MDNUMBLKS) as slot: mdTags = [ "%s%s" % (sc.TAG_PREFIX_MD, slot), "%s%s" % (sc.TAG_PREFIX_PARENT, srcVolUUID), "%s%s" % (sc.TAG_PREFIX_IMAGE, imgUUID) ] lvm.changeLVTags(dom.sdUUID, volUUID, delTags=[sc.TAG_VOL_UNINIT], addTags=mdTags) try: lvm.deactivateLVs(dom.sdUUID, [volUUID]) except se.CannotDeactivateLogicalVolume: cls.log.warn("Cannot deactivate new created volume %s/%s", dom.sdUUID, volUUID, exc_info=True) return (dom.sdUUID, slot)