def test_lv_create_remove(tmp_storage, read_only): dev_size = 10 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_any = "lv-on-any-device" lv_specific = "lv-on-device-2" # Creating VG and LV requires read-write mode. lvm.set_read_only(False) lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) # Create the first LV on any device. lvm.createLV(vg_name, lv_any, 1024) # Getting lv must work in both read-only and read-write modes. lvm.set_read_only(read_only) clear_stats() lv = lvm.getLV(vg_name, lv_any) check_stats(hits=0, misses=1) # Call getLV() again will have cache hit. lvm.getLV(vg_name, lv_any) check_stats(hits=1, misses=1) assert lv.name == lv_any assert lv.vg_name == vg_name assert int(lv.size) == GiB assert lv.tags == () assert lv.writeable assert not lv.opened assert lv.active # LV typically created on dev1. device, extent = lvm.getFirstExt(vg_name, lv_any) assert device in dev1, dev2 assert extent == "0" # Create the second LV on dev2 - reuquires read-write mode. lvm.set_read_only(False) lvm.createLV(vg_name, lv_specific, 1024, device=dev2) # Testing LV must work in both read-only and read-write modes. lvm.set_read_only(read_only) device, extent = lvm.getFirstExt(vg_name, lv_specific) assert device == dev2 # Remove both LVs - requires read-write mode. lvm.set_read_only(False) lvm.removeLVs(vg_name, [lv_any, lv_specific]) # Testing if lv exists most work in both read-only and read-write. lvm.set_read_only(read_only) for lv_name in (lv_any, lv_specific): clear_stats() with pytest.raises(se.LogicalVolumeDoesNotExistError): lvm.getLV(vg_name, lv_name) check_stats(hits=0, misses=1)
def test_lv_create_remove(tmp_storage, read_only): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_any = "lv-on-any-device" lv_specific = "lv-on-device-2" # Creating VG and LV requires read-write mode. lvm.set_read_only(False) lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) # Create the first LV on any device. lvm.createLV(vg_name, lv_any, 1024) # Getting lv must work in both read-only and read-write modes. lvm.set_read_only(read_only) lv = lvm.getLV(vg_name, lv_any) assert lv.name == lv_any assert lv.vg_name == vg_name assert int(lv.size) == 1024**3 assert lv.tags == () assert lv.writeable assert not lv.opened assert lv.active # LV typically created on dev1. device, extent = lvm.getFirstExt(vg_name, lv_any) assert device in dev1, dev2 assert extent == "0" # Create the second LV on dev2 - reuquires read-write mode. lvm.set_read_only(False) lvm.createLV(vg_name, lv_specific, 1024, device=dev2) # Testing LV must work in both read-only and read-write modes. lvm.set_read_only(read_only) device, extent = lvm.getFirstExt(vg_name, lv_specific) assert device == dev2 # Remove both LVs - requires read-write mode. lvm.set_read_only(False) lvm.removeLVs(vg_name, [lv_any, lv_specific]) # Testing if lv exists most work in both read-only and read-write. lvm.set_read_only(read_only) for lv_name in (lv_any, lv_specific): with pytest.raises(se.LogicalVolumeDoesNotExistError): lvm.getLV(vg_name, lv_name)
def halfbakedVolumeRollback(cls, taskObj, sdUUID, volUUID, volPath): cls.log.info("sdUUID=%s volUUID=%s volPath=%s" % (sdUUID, volUUID, volPath)) try: # Fix me: assert resource lock. tags = lvm.getLV(sdUUID, volUUID).tags except se.LogicalVolumeDoesNotExistError: pass # It's OK: inexistent LV, don't try to remove. else: if sc.TAG_VOL_UNINIT in tags: try: lvm.removeLVs(sdUUID, (volUUID, )) except se.LogicalVolumeRemoveError as e: cls.log.warning("Cannot remove logical volume: %s", e) if os.path.lexists(volPath): cls.log.info("Unlinking half baked volume: %s", volPath) os.unlink(volPath)
def halfbakedVolumeRollback(cls, taskObj, sdUUID, volUUID, volPath): cls.log.info("sdUUID=%s volUUID=%s volPath=%s" % (sdUUID, volUUID, volPath)) try: # Fix me: assert resource lock. tags = lvm.getLV(sdUUID, volUUID).tags except se.LogicalVolumeDoesNotExistError: pass # It's OK: inexistent LV, don't try to remove. else: if sc.TAG_VOL_UNINIT in tags: try: lvm.removeLVs(sdUUID, volUUID) except se.CannotRemoveLogicalVolume as e: cls.log.warning("Remove logical volume failed %s/%s %s", sdUUID, volUUID, str(e)) if os.path.lexists(volPath): cls.log.info("Unlinking half baked volume: %s", volPath) os.unlink(volPath)
def delete(self, postZero, force, discard): """ Delete volume 'postZero' - zeroing file before deletion 'force' is required to remove shared and internal volumes 'discard' - discard lv before deletion """ self.log.info("Request to delete LV %s of image %s in VG %s ", self.volUUID, self.imgUUID, self.sdUUID) vol_path = self.getVolumePath() # On block storage domains we store a volume's parent UUID in two # places: 1) in the domain's metadata LV, and 2) in a LV tag attached # to the volume LV itself. The LV tag is more efficient to access # than the domain metadata but it may only be updated by the SPM. # # This means that after a live merge completes the domain metadata LV # will be updated but the LV tag will not. We can detect this case # here and fix the LV tag since this is an SPM verb. # # File domains do not have this complexity because the metadata is # stored in only one place and that metadata is updated by the HSM # host when the live merge finishes. sync = False for childID in self.getChildren(): child = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID, childID) metaParent = child.getParentMeta() tagParent = child.getParentTag() if metaParent != tagParent: self.log.debug( "Updating stale PUUID LV tag from %s to %s for " "volume %s", tagParent, metaParent, child.volUUID) child.setParentTag(metaParent) sync = True if sync: self.recheckIfLeaf() if not force: self.validateDelete() # Mark volume as illegal before deleting self.setLegality(sc.ILLEGAL_VOL) if postZero or discard: self.prepare(justme=True, rw=True, chainrw=force, setrw=True, force=True) try: if postZero: blockdev.zero(vol_path, task=vars.task) if discard: blockdev.discard(vol_path) finally: self.teardown(self.sdUUID, self.volUUID, justme=True) # try to cleanup as much as possible eFound = se.CannotDeleteVolume(self.volUUID) puuid = None try: # We need to blank parent record in our metadata # for parent to become leaf successfully. puuid = self.getParent() self.setParent(sc.BLANK_UUID) if puuid and puuid != sc.BLANK_UUID: pvol = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID, puuid) pvol.recheckIfLeaf() except Exception as e: eFound = e self.log.warning("cannot finalize parent volume %s", puuid, exc_info=True) # Basically, we want to mark the volume _remove_me at the beginning of # the delete; however, with the current delete logic, if marking the # volume fails, and the deleted volume is a leaf, we end up with a # chain with a valid leaf volume. # The ultimate solution of volume deletion requires changes in # image.syncVolumeChain to disconnect the volume from the chain, # and probably there mark it as _remove_me. manifest = sdCache.produce_manifest(self.sdUUID) manifest.markForDelVols(self.sdUUID, self.imgUUID, [self.volUUID], sc.REMOVED_IMAGE_PREFIX) try: lvm.removeLVs(self.sdUUID, (self.volUUID, )) except se.CannotRemoveLogicalVolume as e: self.log.exception( "Failed to delete volume %s/%s. The " "logical volume must be removed manually.", self.sdUUID, self.volUUID) try: self.log.info("Unlinking %s", vol_path) os.unlink(vol_path) return True except Exception as e: eFound = e self.log.error("cannot delete volume's %s/%s link path: %s", self.sdUUID, self.volUUID, vol_path, exc_info=True) raise eFound
def delete(self, postZero, force, discard): """ Delete volume 'postZero' - zeroing file before deletion 'force' is required to remove shared and internal volumes 'discard' - discard lv before deletion """ self.log.info("Request to delete LV %s of image %s in VG %s ", self.volUUID, self.imgUUID, self.sdUUID) vol_path = self.getVolumePath() offs = self.getMetaOffset() # On block storage domains we store a volume's parent UUID in two # places: 1) in the domain's metadata LV, and 2) in a LV tag attached # to the volume LV itself. The LV tag is more efficient to access # than the domain metadata but it may only be updated by the SPM. # # This means that after a live merge completes the domain metadata LV # will be updated but the LV tag will not. We can detect this case # here and fix the LV tag since this is an SPM verb. # # File domains do not have this complexity because the metadata is # stored in only one place and that metadata is updated by the HSM # host when the live merge finishes. sync = False for childID in self.getChildren(): child = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID, childID) metaParent = child.getParentMeta() tagParent = child.getParentTag() if metaParent != tagParent: self.log.debug("Updating stale PUUID LV tag from %s to %s for " "volume %s", tagParent, metaParent, child.volUUID) child.setParentTag(metaParent) sync = True if sync: self.recheckIfLeaf() if not force: self.validateDelete() # Mark volume as illegal before deleting self.setLegality(sc.ILLEGAL_VOL) if postZero or discard: self.prepare(justme=True, rw=True, chainrw=force, setrw=True, force=True) try: if postZero: blockdev.zero(vol_path, task=vars.task) if discard: blockdev.discard(vol_path) finally: self.teardown(self.sdUUID, self.volUUID, justme=True) # try to cleanup as much as possible eFound = se.CannotDeleteVolume(self.volUUID) puuid = None try: # We need to blank parent record in our metadata # for parent to become leaf successfully. puuid = self.getParent() self.setParent(sc.BLANK_UUID) if puuid and puuid != sc.BLANK_UUID: pvol = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID, puuid) pvol.recheckIfLeaf() except Exception as e: eFound = e self.log.warning("cannot finalize parent volume %s", puuid, exc_info=True) try: try: lvm.removeLVs(self.sdUUID, self.volUUID) except se.CannotRemoveLogicalVolume: # At this point LV is already marked as illegal, we will # try to cleanup whatever we can... pass self.removeMetadata([self.sdUUID, offs]) except Exception as e: eFound = e self.log.error("cannot remove volume %s/%s", self.sdUUID, self.volUUID, exc_info=True) try: self.log.debug("Unlinking %s", vol_path) os.unlink(vol_path) return True except Exception as e: eFound = e self.log.error("cannot delete volume's %s/%s link path: %s", self.sdUUID, self.volUUID, vol_path, exc_info=True) raise eFound
def delete(self, postZero, force, discard): """ Delete volume 'postZero' - zeroing file before deletion 'force' is required to remove shared and internal volumes 'discard' - discard lv before deletion """ self.log.info("Request to delete LV %s of image %s in VG %s ", self.volUUID, self.imgUUID, self.sdUUID) vol_path = self.getVolumePath() slot = self.getMetaSlot() # On block storage domains we store a volume's parent UUID in two # places: 1) in the domain's metadata LV, and 2) in a LV tag attached # to the volume LV itself. The LV tag is more efficient to access # than the domain metadata but it may only be updated by the SPM. # # This means that after a live merge completes the domain metadata LV # will be updated but the LV tag will not. We can detect this case # here and fix the LV tag since this is an SPM verb. # # File domains do not have this complexity because the metadata is # stored in only one place and that metadata is updated by the HSM # host when the live merge finishes. sync = False for childID in self.getChildren(): child = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID, childID) metaParent = child.getParentMeta() tagParent = child.getParentTag() if metaParent != tagParent: self.log.debug("Updating stale PUUID LV tag from %s to %s for " "volume %s", tagParent, metaParent, child.volUUID) child.setParentTag(metaParent) sync = True if sync: self.recheckIfLeaf() if not force: self.validateDelete() # Mark volume as illegal before deleting self.setLegality(sc.ILLEGAL_VOL) if postZero or discard: self.prepare(justme=True, rw=True, chainrw=force, setrw=True, force=True) try: if postZero: blockdev.zero(vol_path, task=vars.task) if discard: blockdev.discard(vol_path) finally: self.teardown(self.sdUUID, self.volUUID, justme=True) # try to cleanup as much as possible eFound = se.CannotDeleteVolume(self.volUUID) puuid = None try: # We need to blank parent record in our metadata # for parent to become leaf successfully. puuid = self.getParent() self.setParent(sc.BLANK_UUID) if puuid and puuid != sc.BLANK_UUID: pvol = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID, puuid) pvol.recheckIfLeaf() except Exception as e: eFound = e self.log.warning("cannot finalize parent volume %s", puuid, exc_info=True) # Basically, we want to mark the volume _remove_me at the beginning of # the delete; however, with the current delete logic, if marking the # volume fails, and the deleted volume is a leaf, we end up with a # chain with a valid leaf volume. # The ultimate solution of volume deletion requires changes in # image.syncVolumeChain to disconnect the volume from the chain, # and probably there mark it as _remove_me. manifest = sdCache.produce_manifest(self.sdUUID) manifest.markForDelVols(self.sdUUID, self.imgUUID, [self.volUUID], sc.REMOVED_IMAGE_PREFIX) try: lvm.removeLVs(self.sdUUID, self.volUUID) except se.CannotRemoveLogicalVolume as e: self.log.exception("Failed to delete volume %s/%s. The " "logical volume must be removed manually.", self.sdUUID, self.volUUID) else: # If removing the LV fails, we don't want to remove the # metadata. As the volume still exists on the storage, and is # accessible, removing the metadata will cause unexpected # errors when accessing the metadata that was wiped. This is a # minimal solution for: https://bugzilla.redhat.com/1574631 try: self.removeMetadata([self.sdUUID, slot]) except se.VolumeMetadataWriteError as e: eFound = e self.log.exception("Failed to delete volume %s/%s metadata.", self.sdUUID, self.volUUID) try: self.log.info("Unlinking %s", vol_path) os.unlink(vol_path) return True except Exception as e: eFound = e self.log.error("cannot delete volume's %s/%s link path: %s", self.sdUUID, self.volUUID, vol_path, exc_info=True) raise eFound
def delete(self, postZero, force, discard): """ Delete volume 'postZero' - zeroing file before deletion 'force' is required to remove shared and internal volumes 'discard' - discard lv before deletion """ self.log.info("Request to delete LV %s of image %s in VG %s ", self.volUUID, self.imgUUID, self.sdUUID) vol_path = self.getVolumePath() offs = self.getMetaOffset() # On block storage domains we store a volume's parent UUID in two # places: 1) in the domain's metadata LV, and 2) in a LV tag attached # to the volume LV itself. The LV tag is more efficient to access # than the domain metadata but it may only be updated by the SPM. # # This means that after a live merge completes the domain metadata LV # will be updated but the LV tag will not. We can detect this case # here and fix the LV tag since this is an SPM verb. # # File domains do not have this complexity because the metadata is # stored in only one place and that metadata is updated by the HSM # host when the live merge finishes. sync = False for childID in self.getChildren(): child = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID, childID) metaParent = child.getParentMeta() tagParent = child.getParentTag() if metaParent != tagParent: self.log.debug("Updating stale PUUID LV tag from %s to %s for " "volume %s", tagParent, metaParent, child.volUUID) child.setParentTag(metaParent) sync = True if sync: self.recheckIfLeaf() if not force: self.validateDelete() # Mark volume as illegal before deleting self.setLegality(sc.ILLEGAL_VOL) discard_needed = discard or blockdev.discard_enabled() if postZero or discard_needed: self.prepare(justme=True, rw=True, chainrw=force, setrw=True, force=True) try: if postZero: blockdev.zero(vol_path, task=vars.task) if discard_needed: blockdev.discard(vol_path) finally: self.teardown(self.sdUUID, self.volUUID, justme=True) # try to cleanup as much as possible eFound = se.CannotDeleteVolume(self.volUUID) puuid = None try: # We need to blank parent record in our metadata # for parent to become leaf successfully. puuid = self.getParent() self.setParent(sc.BLANK_UUID) if puuid and puuid != sc.BLANK_UUID: pvol = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID, puuid) pvol.recheckIfLeaf() except Exception as e: eFound = e self.log.warning("cannot finalize parent volume %s", puuid, exc_info=True) try: try: lvm.removeLVs(self.sdUUID, self.volUUID) except se.CannotRemoveLogicalVolume: # At this point LV is already marked as illegal, we will # try to cleanup whatever we can... pass self.removeMetadata([self.sdUUID, offs]) except Exception as e: eFound = e self.log.error("cannot remove volume %s/%s", self.sdUUID, self.volUUID, exc_info=True) try: self.log.debug("Unlinking %s", vol_path) os.unlink(vol_path) return True except Exception as e: eFound = e self.log.error("cannot delete volume's %s/%s link path: %s", self.sdUUID, self.volUUID, vol_path, exc_info=True) raise eFound