def syncVolumeChain(self, sdUUID, imgUUID, volUUID, actualChain): """ Fix volume metadata to reflect the given actual chain. This function is used to correct the volume chain linkage after a live merge. """ curChain = self.getChain(sdUUID, imgUUID, volUUID) log_str = logutils.volume_chain_to_str(vol.volUUID for vol in curChain) self.log.info("Current chain=%s ", log_str) subChain = [] for vol in curChain: if vol.volUUID not in actualChain: subChain.insert(0, vol.volUUID) elif len(subChain) > 0: break if len(subChain) == 0: return self.log.info("Unlinking subchain: %s", subChain) sdDom = sdCache.produce(sdUUID=sdUUID) dstParent = sdDom.produceVolume(imgUUID, subChain[0]).getParent() subChainTailVol = sdDom.produceVolume(imgUUID, subChain[-1]) if subChainTailVol.isLeaf(): self.log.info( "Leaf volume %s is being removed from the chain. " "Marking it ILLEGAL to prevent data corruption", subChainTailVol.volUUID) subChainTailVol.setLegality(sc.ILLEGAL_VOL) else: for childID in subChainTailVol.getChildren(): self.log.info("Setting parent of volume %s to %s", childID, dstParent) sdDom.produceVolume(imgUUID, childID). \ setParentMeta(dstParent)
def estimate_qcow2_size(self, src_vol_params, dst_sd_id): """ Calculate volume allocation size for converting raw/qcow2 source volume to qcow2 volume on destination storage domain. Arguments: src_vol_params(dict): Dictionary returned from `storage.volume.Volume.getVolumeParams()` dst_sd_id(str) : Destination volume storage domain id Returns: Volume allocation in bytes """ # measure required size. qemu_measure = qemuimg.measure(image=src_vol_params['path'], format=sc.fmt2str( src_vol_params['volFormat']), output_format=qemuimg.FORMAT.QCOW2) # Adds extra room so we don't have to extend this disk immediately # when a vm is started. chunk_size_mb = config.getint("irs", "volume_utilization_chunk_mb") chunk_size = chunk_size_mb * MiB required = (qemu_measure["required"] + chunk_size) # Limit estimates size by maximum size. vol_class = sdCache.produce(dst_sd_id).getVolumeClass() max_size = vol_class.max_size(src_vol_params['capacity'], sc.COW_FORMAT) allocation = min(required, max_size) # Return estimated size of allocation. self.log.debug("Estimated allocation for qcow2 volume:" "%d", allocation) return allocation
def _extend_base_allocation(base_vol, top_vol): if not (base_vol.is_block() and base_vol.getFormat() == sc.COW_FORMAT): return # Measure the subchain from top to base. This gives us the required # allocation for merging top into base. log.debug("Measuring sub chain top=%r base=%r", top_vol.volUUID, base_vol.volUUID) measure = qemuimg.measure(top_vol.getVolumePath(), format=qemuimg.FORMAT.QCOW2, output_format=qemuimg.FORMAT.QCOW2, is_block=True, base=base_vol.getVolumePath()) log.debug("Measure result: %s", measure) # When merging we always copy the bitmaps from the top to base. Measure # gives us the size of the bitmaps in top *and* base, so this may allocate # more than needed, but bitmaps are small so it should be good enough. required_size = measure["required"] + measure.get("bitmaps", 0) # If the top volume is leaf, the base volume will become leaf after the # merge, so it needs more space. optimal_size = base_vol.optimal_cow_size(required_size, base_vol.getCapacity(), top_vol.isLeaf()) # Extend the volume. dom = sdCache.produce(base_vol.sdUUID) dom.extendVolume(base_vol.volUUID, optimal_size // MiB)
def _shrink_base_volume(subchain, optimal_size): # Must produce a volume because subchain.base_vol is a VolumeManifest, # while reduce is implemented on the Volume. sd = sdCache.produce(subchain.sd_id) base_vol = sd.produceVolume(subchain.img_id, subchain.base_id) optimal_size_blk = optimal_size // sc.BLOCK_SIZE base_vol.reduce(optimal_size_blk)
def move(self, srcSdUUID, dstSdUUID, imgUUID, vmUUID, op, postZero, force, discard): """ Move/Copy image between storage domains within same storage pool """ self.log.info( "srcSdUUID=%s dstSdUUID=%s imgUUID=%s vmUUID=%s op=%s " "force=%s postZero=%s discard=%s", srcSdUUID, dstSdUUID, imgUUID, vmUUID, OP_TYPES[op], str(force), str(postZero), discard) destDom = sdCache.produce(dstSdUUID) # If image already exists check whether it illegal/fake, overwrite it if not self.isLegal(destDom.sdUUID, imgUUID): force = True # We must first remove the previous instance of image (if exists) # in destination domain, if we got the overwrite command if force: self.log.info("delete image %s on domain %s before overwriting", imgUUID, destDom.sdUUID) _deleteImage(destDom, imgUUID, postZero, discard) chains = self._createTargetImage(destDom, srcSdUUID, imgUUID) self._interImagesCopy(destDom, srcSdUUID, imgUUID, chains) self._finalizeDestinationImage(destDom, imgUUID, chains, force) if force: leafVol = chains['dstChain'][-1] # Now we should re-link all deleted hardlinks, if exists destDom.templateRelink(imgUUID, leafVol.volUUID) # At this point we successfully finished the 'copy' part of the # operation and we can clear all recoveries. vars.task.clearRecoveries() # If it's 'move' operation, we should delete src image after copying if op == MOVE_OP: # TODO: Should raise here. try: dom = sdCache.produce(srcSdUUID) _deleteImage(dom, imgUUID, postZero, discard) except se.StorageException: self.log.warning( "Failed to remove img: %s from srcDom %s: " "after it was copied to: %s", imgUUID, srcSdUUID, dstSdUUID) self.log.info("%s task on image %s was successfully finished", OP_TYPES[op], imgUUID) return True
def copyFromImage(self, methodArgs, sdUUID, imgUUID, volUUID): domain = sdCache.produce(sdUUID) vol = self._activateVolumeForImportExport(domain, imgUUID, volUUID) try: imageSharing.copyFromImage(vol.getVolumePath(), methodArgs) finally: domain.deactivateImage(imgUUID)
def getVmVolumeInfo(self): """ Send info to represent Gluster volume as a network block device """ rpath = sdCache.produce(self.sdUUID).getRealPath() volfileServer, volname = rpath.rsplit(":", 1) volname = volname.strip('/') # Extract the volume's transport using gluster cli svdsmProxy = svdsm.getProxy() try: res = svdsmProxy.glusterVolumeInfo(volname, volfileServer) except GlusterException: # In case of issues with finding transport type, default to tcp self.log.warning( "Unable to find transport type for GlusterFS" " volume %s. GlusterFS server = %s." "Defaulting to tcp", volname, volfileServer, exc_info=True) transport = VOLUME_TRANS_MAP['TCP'] brickServers = [] else: vol_info = res[volname] transport = VOLUME_TRANS_MAP[vol_info['transportType'][0]] brickServers = utils.unique( brick.split(":", 1)[0] for brick in vol_info['bricks']) # remove server passed as argument from backup servers to avoid # duplicates if volfileServer in brickServers: brickServers.remove(volfileServer) # gfapi does not use brick ports, it uses the glusterd port (24007) # from the hosts passed to fetch the volume information. # If 0 is passed, gfapi defaults to 24007. volPort = "0" imgFilePath = self.getVolumePath() imgFilePath_list = imgFilePath.rsplit("/") # Extract path to the image, relative to the gluster mount imgFileRelPath = "/".join(imgFilePath_list[-4:]) glusterPath = volname + '/' + imgFileRelPath hosts = [dict(name=volfileServer, port=volPort, transport=transport)] hosts.extend( dict(name=brickServer, port=volPort, transport=transport) for brickServer in brickServers) return { 'type': 'network', 'path': glusterPath, 'protocol': 'gluster', 'hosts': hosts, }
def _getSparsifyVolume(self, sdUUID, imgUUID, volUUID): # FIXME: # sdCache.produce.produceVolume gives volumes that return getVolumePath # with a colon (:) for NFS servers. So, we're using volClass(). # https://bugzilla.redhat.com/1128942 # If, and when the bug gets solved, use # sdCache.produce(...).produceVolume(...) to create the volumes. volClass = sdCache.produce(sdUUID).getVolumeClass() return volClass(self.repoPath, sdUUID, imgUUID, volUUID)
def copyToImage(self, methodArgs, sdUUID, imgUUID, volUUID=None): domain = sdCache.produce(sdUUID) vol = self._activateVolumeForImportExport(domain, imgUUID, volUUID) try: # Extend the volume (if relevant) to the image size vol.extend(imageSharing.getLengthFromArgs(methodArgs)) imageSharing.copyToImage(vol.getVolumePath(), methodArgs) finally: domain.deactivateImage(imgUUID)
def upload(self, methodArgs, sdUUID, imgUUID, volUUID=None): domain = sdCache.produce(sdUUID) vol = self._activateVolumeForImportExport(domain, imgUUID, volUUID) try: self._check_sharing_method(methodArgs) glance.upload_image(vol.getVolumePath(), methodArgs["url"], headers=methodArgs.get("headers")) finally: domain.deactivateImage(imgUUID)
def download(self, methodArgs, sdUUID, imgUUID, volUUID=None): domain = sdCache.produce(sdUUID) vol = self._activateVolumeForImportExport(domain, imgUUID, volUUID) try: self._check_sharing_method(methodArgs) # Extend the volume (if relevant) to the image size image_info = glance.image_info(methodArgs.get('url'), headers=methodArgs.get("headers")) vol.extend(image_info["size"]) glance.download_image(vol.getVolumePath(), methodArgs["url"], headers=methodArgs.get("headers")) finally: domain.deactivateImage(imgUUID)
def _extend_base_allocation(base_vol, top_vol): if not (base_vol.is_block() and base_vol.getFormat() == sc.COW_FORMAT): return base_alloc = base_vol.getVolumeSize() top_alloc = top_vol.getVolumeSize() vol_chunk_size = config.getint('irs', 'volume_utilization_chunk_mb') * MiB potential_alloc = base_alloc + top_alloc + vol_chunk_size # TODO: add chunk_size only if top is leaf. capacity = base_vol.getCapacity() max_alloc = utils.round(capacity * sc.COW_OVERHEAD, MiB) actual_alloc = min(potential_alloc, max_alloc) actual_alloc = utils.round(actual_alloc, MiB) actual_alloc_mb = actual_alloc // MiB dom = sdCache.produce(base_vol.sdUUID) dom.extendVolume(base_vol.volUUID, actual_alloc_mb)
def _extend_base_allocation(base_vol, top_vol): if not (base_vol.is_block() and base_vol.getFormat() == sc.COW_FORMAT): return base_alloc = base_vol.getVolumeSize(bs=1) top_alloc = top_vol.getVolumeSize(bs=1) vol_chunk_size = (config.getint('irs', 'volume_utilization_chunk_mb') * constants.MEGAB) potential_alloc = base_alloc + top_alloc + vol_chunk_size # TODO: add chunk_size only if top is leaf. capacity = base_vol.getSize() * sc.BLOCK_SIZE max_alloc = utils.round(capacity * sc.COW_OVERHEAD, constants.MEGAB) actual_alloc = min(potential_alloc, max_alloc) actual_alloc_mb = (actual_alloc + constants.MEGAB - 1) / constants.MEGAB dom = sdCache.produce(base_vol.sdUUID) dom.extendVolume(base_vol.volUUID, actual_alloc_mb)
def _extend_base_allocation(base_vol, top_vol): if not (base_vol.is_block() and base_vol.getFormat() == sc.COW_FORMAT): return base_alloc = base_vol.getVolumeSize(bs=1) top_alloc = top_vol.getVolumeSize(bs=1) vol_chunk_size = (config.getint('irs', 'volume_utilization_chunk_mb') * constants.MEGAB) potential_alloc = base_alloc + top_alloc + vol_chunk_size # TODO: add chunk_size only if top is leaf. capacity = base_vol.getSizeBlk() * sc.BLOCK_SIZE max_alloc = utils.round(capacity * sc.COW_OVERHEAD, constants.MEGAB) actual_alloc = min(potential_alloc, max_alloc) actual_alloc_mb = (actual_alloc + constants.MEGAB - 1) / constants.MEGAB dom = sdCache.produce(base_vol.sdUUID) dom.extendVolume(base_vol.volUUID, actual_alloc_mb)
def _share(self, dstImgPath): """ Share this volume to dstImgPath, including the metadata and the lease """ dstVolPath = os.path.join(dstImgPath, self.volUUID) dstMetaPath = self.getMetaVolumePath(dstVolPath) self.log.debug("Share volume %s to %s", self.volUUID, dstImgPath) self.oop.utils.forceLink(self.getVolumePath(), dstVolPath) self.log.debug("Share volume metadata of %s to %s", self.volUUID, dstImgPath) self.oop.utils.forceLink(self.getMetaVolumePath(), dstMetaPath) # Link the lease file if the domain uses sanlock if sdCache.produce(self.sdUUID).hasVolumeLeases(): self._shareLease(dstImgPath)
def _share(self, dstImgPath): """ Share this volume to dstImgPath, including the metadata and the lease """ dstVolPath = os.path.join(dstImgPath, self.volUUID) dstMetaPath = self.getMetaVolumePath(dstVolPath) self.log.debug("Share volume %s to %s", self.volUUID, dstImgPath) self.oop.utils.forceLink(self.getVolumePath(), dstVolPath) self.log.debug("Share volume metadata of %s to %s", self.volUUID, dstImgPath) self.oop.utils.forceLink(self.getMetaVolumePath(), dstMetaPath) # Link the lease file if the domain uses sanlock if sdCache.produce(self.sdUUID).hasVolumeLeases(): self._shareLease(dstImgPath)
def createFakeTemplate(self, sdUUID, volParams): """ Create fake template (relevant for Backup domain only) """ with self._fakeTemplateLock: try: destDom = sdCache.produce(sdUUID) volclass = destDom.getVolumeClass() # Validate that the destination template exists and accessible volclass(self.repoPath, sdUUID, volParams['imgUUID'], volParams['volUUID']) except (se.VolumeDoesNotExist, se.ImagePathError): try: # Create fake parent volume destDom.createVolume(imgUUID=volParams['imgUUID'], capacity=volParams['capacity'], volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType=volParams['disktype'], volUUID=volParams['volUUID'], desc="Fake volume", srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID) vol = destDom.produceVolume(imgUUID=volParams['imgUUID'], volUUID=volParams['volUUID']) # Mark fake volume as "FAKE" vol.setLegality(sc.FAKE_VOL) # Mark fake volume as shared vol.setShared() # Now we should re-link all hardlinks of this template in # all VMs based on it destDom.templateRelink(volParams['imgUUID'], volParams['volUUID']) self.log.debug( "Succeeded to create fake image %s in " "domain %s", volParams['imgUUID'], destDom.sdUUID) except Exception: self.log.error( "Failure to create fake image %s in domain " "%s", volParams['imgUUID'], destDom.sdUUID, exc_info=True)
def isLegal(self, sdUUID, imgUUID): """ Check correctness of the whole chain (excluding template) """ try: legal = True volclass = sdCache.produce(sdUUID).getVolumeClass() vollist = volclass.getImageVolumes(sdUUID, imgUUID) self.log.info("image %s in domain %s has vollist %s", imgUUID, sdUUID, str(vollist)) for v in vollist: vol = volclass(self.repoPath, sdUUID, imgUUID, v) if not vol.isLegal() or vol.isFake(): legal = False break except: legal = False return legal
def _update_base_capacity(base_vol, top_vol): top_capacity = top_vol.getCapacity() base_capacity = base_vol.getCapacity() # TODO: raise if top < base raise some impossible state error. if top_capacity <= base_capacity: return if base_vol.getFormat() == sc.RAW_FORMAT: log.info("Updating base capacity, extending size of raw base " "volume to %d", top_capacity) # extendSize can run on only SPM so only StorageDomain implement it. dom = sdCache.produce(base_vol.sdUUID) vol = dom.produceVolume(base_vol.imgUUID, base_vol.volUUID) vol.extendSize(top_capacity) else: log.info("Updating base capacity, setting size in metadata to " "%d for cow base volume", top_capacity) base_vol.setCapacity(top_capacity)
def syncData(self, sdUUID, imgUUID, dstSdUUID, syncType): srcChain = self.getChain(sdUUID, imgUUID) log_str = logutils.volume_chain_to_str(vol.volUUID for vol in srcChain) self.log.info("Source chain=%s ", log_str) dstChain = self.getChain(dstSdUUID, imgUUID) log_str = logutils.volume_chain_to_str(vol.volUUID for vol in dstChain) self.log.info("Dest chain=%s ", log_str) if syncType == SYNC_VOLUMES_INTERNAL: try: # Removing the leaf volumes del srcChain[-1], dstChain[-1] except IndexError: raise se.ImageIsNotLegalChain() elif syncType == SYNC_VOLUMES_LEAF: try: # Removing all the internal volumes del srcChain[:-1], dstChain[:-1] except IndexError: raise se.ImageIsNotLegalChain() elif syncType != SYNC_VOLUMES_ALL: raise se.MiscNotImplementedException() if len(srcChain) != len(dstChain): raise se.DestImageActionError(imgUUID, dstSdUUID) # Checking the volume uuids (after removing the leaves to allow # different uuids for the current top layer, see previous check). for i, v in enumerate(srcChain): if v.volUUID != dstChain[i].volUUID: raise se.DestImageActionError(imgUUID, dstSdUUID) dstDom = sdCache.produce(dstSdUUID) self._interImagesCopy(dstDom, sdUUID, imgUUID, { 'srcChain': srcChain, 'dstChain': dstChain }) self._finalizeDestinationImage(dstDom, imgUUID, { 'srcChain': srcChain, 'dstChain': dstChain }, False)
def _update_base_capacity(base_vol, top_vol): top_size = top_vol.getSize() base_size = base_vol.getSize() # TODO: raise if top < base raise some impossible state error. if top_size <= base_size: return if base_vol.getFormat() == sc.RAW_FORMAT: log.info("Updating base capacity, extending size of raw base " "volume to %d", top_size) # extendSize can run on only SPM so only StorageDomain implement it. dom = sdCache.produce(base_vol.sdUUID) vol = dom.produceVolume(base_vol.imgUUID, base_vol.volUUID) vol.extendSize(top_size) else: log.info("Updating base capacity, setting size in metadata to " "%d for cow base volume", top_size) base_vol.setSize(top_size)
def reconcileVolumeChain(self, sdUUID, imgUUID, leafVolUUID): """ Discover and return the actual volume chain of an offline image according to the qemu-img info command and synchronize volume metadata. """ # Prepare volumes dom = sdCache.produce(sdUUID) allVols = dom.getAllVolumes() imgVolumes = sd.getVolsOfImage(allVols, imgUUID).keys() dom.activateVolumes(imgUUID, imgVolumes) # Walk the volume chain using qemu-img. Not safe for running VMs actualVolumes = [] volUUID = leafVolUUID while volUUID is not None: actualVolumes.insert(0, volUUID) vol = dom.produceVolume(imgUUID, volUUID) qemuImgFormat = sc.fmt2str(vol.getFormat()) imgInfo = qemuimg.info(vol.volumePath, qemuImgFormat) backingFile = imgInfo.get('backingfile') if backingFile is not None: volUUID = os.path.basename(backingFile) else: volUUID = None # A merge of the active layer has copy and pivot phases. # During copy, data is copied from the leaf into its parent. Writes # are mirrored to both volumes. So even after copying is complete the # volumes will remain consistent. Finally, the VM is pivoted from the # old leaf to the new leaf and mirroring to the old leaf ceases. During # mirroring and before pivoting, we mark the old leaf ILLEGAL so we # know it's safe to delete in case the operation is interrupted. vol = dom.produceVolume(imgUUID, leafVolUUID) if vol.getLegality() == sc.ILLEGAL_VOL: actualVolumes.remove(leafVolUUID) # Now that we know the correct volume chain, sync the storge metadata self.syncVolumeChain(sdUUID, imgUUID, actualVolumes[-1], actualVolumes) dom.deactivateImage(imgUUID) return actualVolumes
def __getResourceCandidatesList(self, resourceName, lockType): """ Return list of lock candidates (template and volumes) """ # Must be imported here due to import cycles. # TODO: Move getChain to another module to we can use normal import. import vdsm.storage.image as image volResourcesList = [] template = None dom = sdCache.produce(sdUUID=self.sdUUID) # Get the list of the volumes repoPath = os.path.join(sc.REPO_DATA_CENTER, dom.getPools()[0]) try: chain = image.Image(repoPath).getChain(sdUUID=self.sdUUID, imgUUID=resourceName) except se.ImageDoesNotExistInSD: log.debug("Image %s does not exist in domain %s", resourceName, self.sdUUID) return [] # check if the chain is build above a template, or it is a standalone pvol = chain[0].getParentVolume() if pvol: template = pvol.volUUID elif chain[0].isShared(): # Image of template itself, # with no other volumes in chain template = chain[0].volUUID del chain[:] volUUIDChain = [vol.volUUID for vol in chain] volUUIDChain.sort() # Activate all volumes in chain at once. # We will attempt to activate all volumes again down to the flow with # no consequence, since they are already active. # TODO Fix resource framework to hold images, instead of specific vols. # This assumes that chains can not spread into more than one SD. if dom.__class__.__name__ == "BlockStorageDomain": lvm.activateLVs(self.sdUUID, volUUIDChain) failed = False # Acquire template locks: # - 'lockType' for template's image itself # - Always 'shared' lock for image based on template try: if template: if len(volUUIDChain) > 0: volRes = rm.acquireResource( self.volumeResourcesNamespace, template, rm.SHARED, timeout=self.resource_default_timeout) else: volRes = rm.acquireResource( self.volumeResourcesNamespace, template, lockType, timeout=self.resource_default_timeout) volResourcesList.append(volRes) # Acquire 'lockType' volume locks for volUUID in volUUIDChain: volRes = rm.acquireResource( self.volumeResourcesNamespace, volUUID, lockType, timeout=self.resource_default_timeout) volResourcesList.append(volRes) except (rm.RequestTimedOutError, se.ResourceAcqusitionFailed) as e: log.debug("Cannot acquire volume resource (%s)", str(e)) failed = True raise except Exception: log.debug("Cannot acquire volume resource", exc_info=True) failed = True raise finally: if failed: # Release already acquired template/volumes locks for volRes in volResourcesList: volRes.release() return volResourcesList
def _setupDomain(self): log.debug("Producing domain %s", self.sdUUID) domain = sdCache.produce(self.sdUUID) domain.setup() self.domain = domain
def syncVolumeChain(self, sdUUID, imgUUID, volUUID, actualChain): """ Fix volume metadata to reflect the given actual chain. This function is used to correct the volume chain linkage after a live merge or for recovery after live merge failure. There are multiple cases for the usage of this function: 1. Marking leaf volume ILLEGAL before we complete a live merge of the leaf volume. In this case actual_chain will not contain the leaf volume id. actual_chain: ["base-vol", "internal-vol"] current_chain: ["base-vol", "internal-vol", "leaf-vol"] action: mark "leaf-vol" as illegal. 2. Removal of internal volume after internal live merge was completed successfully. In this case actual_chain will not contain one of the internal volumes ids. actual_chain: ["base-vol", "leaf-vol"] current_chain: ["base-vol", "internal-vol", "leaf-vol"] action: set "leaf-vol" parent to "base-vol" 3. Fixing the chain after completing live merge of leaf volume has failed. In this case actual_chain will contain all volumes ids. This reverts the change done in case 1. actual_chain: ["base-vol", "internal-vol", "leaf-vol"] current_chain: ["base-vol", "internal-vol", "leaf-vol"] action: if "leaf-vol" is ILLEGAL, mark it to LEGAL 4. Do nothing if actual and current chain matches (no subChain) and leaf volume is marked legal. I this case no change needs to be done in the current_chain. actual_chain: ["base-vol", "internal-vol", "leaf-vol"] current_chain: ["base-vol", "internal-vol", "leaf-vol"] action: if "leaf-vol" is LEGAL, do nothing """ curChain = self.getChain(sdUUID, imgUUID, volUUID) log_str = logutils.volume_chain_to_str(vol.volUUID for vol in curChain) self.log.info("Current chain=%s ", log_str) sdDom = sdCache.produce(sdUUID) subChain = [] for vol in curChain: if vol.volUUID not in actualChain: subChain.insert(0, vol.volUUID) elif len(subChain) > 0: break if len(subChain) == 0: tailVol = sdDom.produceVolume(imgUUID, volUUID) if not tailVol.isLegal(): # Case 3 - fixing the chain. self.log.info( "Leaf volume %s is ILLEGAL but is part of the actual chain" " - marking it LEGAL so it can be used again.", tailVol.volUUID) tailVol.setLegality(sc.LEGAL_VOL) # Case 4 - do nothing. return dstParent = sdDom.produceVolume(imgUUID, subChain[0]).getParent() subChainTailVol = sdDom.produceVolume(imgUUID, subChain[-1]) if subChainTailVol.isLeaf(): # Case 1 - mark leaf ILLEGAL. self.log.info( "Leaf volume %s is being removed from the actual chain. " "Marking it ILLEGAL to prevent data corruption", subChainTailVol.volUUID) subChainTailVol.setLegality(sc.ILLEGAL_VOL) else: # Case 2 - remove internal volume. for childID in subChainTailVol.getChildren(): self.log.info( "Internal volume %s removed from actual chain, linking " "child volume %s to parent volume %s", subChainTailVol, childID, dstParent) sdDom.produceVolume(imgUUID, childID). \ setParentMeta(dstParent)
def _produceDomain(self): log.debug("Producing domain %s", self.sdUUID) self.domain = sdCache.produce(self.sdUUID)
def _shrink_base_volume(subchain, optimal_size): # Must produce a volume because subchain.base_vol is a VolumeManifest, # while reduce is implemented on the Volume. sd = sdCache.produce(subchain.sd_id) base_vol = sd.produceVolume(subchain.img_id, subchain.base_id) base_vol.reduce(optimal_size // sc.BLOCK_SIZE)
def __getResourceCandidatesList(self, resourceName, lockType): """ Return list of lock candidates (template and volumes) """ # Must be imported here due to import cycles. # TODO: Move getChain to another module to we can use normal import. import vdsm.storage.image as image volResourcesList = [] template = None dom = sdCache.produce(sdUUID=self.sdUUID) # Get the list of the volumes repoPath = os.path.join(sc.REPO_DATA_CENTER, dom.getPools()[0]) try: chain = image.Image(repoPath).getChain(sdUUID=self.sdUUID, imgUUID=resourceName) except se.ImageDoesNotExistInSD: log.debug("Image %s does not exist in domain %s", resourceName, self.sdUUID) return [] # check if the chain is build above a template, or it is a standalone pvol = chain[0].getParentVolume() if pvol: template = pvol.volUUID elif chain[0].isShared(): # Image of template itself, # with no other volumes in chain template = chain[0].volUUID del chain[:] volUUIDChain = [vol.volUUID for vol in chain] volUUIDChain.sort() # Activate all volumes in chain at once. # We will attempt to activate all volumes again down to the flow with # no consequence, since they are already active. # TODO Fix resource framework to hold images, instead of specific vols. # This assumes that chains can not spread into more than one SD. if dom.__class__.__name__ == "BlockStorageDomain": lvm.activateLVs(self.sdUUID, volUUIDChain) failed = False # Acquire template locks: # - 'lockType' for template's image itself # - Always 'shared' lock for image based on template try: if template: if len(volUUIDChain) > 0: volRes = rm.acquireResource( self.volumeResourcesNamespace, template, rm.SHARED, timeout=self.resource_default_timeout) else: volRes = rm.acquireResource( self.volumeResourcesNamespace, template, lockType, timeout=self.resource_default_timeout) volResourcesList.append(volRes) # Acquire 'lockType' volume locks for volUUID in volUUIDChain: volRes = rm.acquireResource( self.volumeResourcesNamespace, volUUID, lockType, timeout=self.resource_default_timeout) volResourcesList.append(volRes) except (rm.RequestTimedOutError, se.ResourceAcqusitionFailed) as e: log.debug("Cannot acquire volume resource (%s)", str(e)) failed = True raise except Exception: log.debug("Cannot acquire volume resource", exc_info=True) failed = True raise finally: if failed: # Release already acquired template/volumes locks for volRes in volResourcesList: volRes.release() return volResourcesList
def copyCollapsed(self, sdUUID, vmUUID, srcImgUUID, srcVolUUID, dstImgUUID, dstVolUUID, descr, dstSdUUID, volType, volFormat, preallocate, postZero, force, discard): """ Create new template/volume from VM. Do it by collapse and copy the whole chain (baseVolUUID->srcVolUUID) """ self.log.info( "sdUUID=%s vmUUID=%s srcImgUUID=%s srcVolUUID=%s " "dstImgUUID=%s dstVolUUID=%s dstSdUUID=%s volType=%s " "volFormat=%s preallocate=%s force=%s postZero=%s " "discard=%s", sdUUID, vmUUID, srcImgUUID, srcVolUUID, dstImgUUID, dstVolUUID, dstSdUUID, volType, sc.type2name(volFormat), sc.type2name(preallocate), str(force), str(postZero), discard) try: srcVol = dstVol = None # Find out dest sdUUID if dstSdUUID == sd.BLANK_UUID: dstSdUUID = sdUUID volclass = sdCache.produce(sdUUID).getVolumeClass() destDom = sdCache.produce(dstSdUUID) # find src volume try: srcVol = volclass(self.repoPath, sdUUID, srcImgUUID, srcVolUUID) except se.StorageException: raise except Exception as e: self.log.error(e, exc_info=True) raise se.SourceImageActionError(srcImgUUID, sdUUID, str(e)) # Create dst volume try: # Before reading source volume parameters from volume metadata, # prepare the volume. This ensure that the volume capacity will # match the actual virtual size, see # https://bugzilla.redhat.com/1700623. srcVol.prepare(rw=False) volParams = srcVol.getVolumeParams() if volFormat in [sc.COW_FORMAT, sc.RAW_FORMAT]: dstVolFormat = volFormat else: dstVolFormat = volParams['volFormat'] # TODO: This is needed only when copying to qcow2-thin volume # on block storage. Move into calculate_initial_size. dst_vol_allocation = self.calculate_vol_alloc( sdUUID, volParams, dstSdUUID, dstVolFormat) # Find out dest volume parameters if preallocate in [sc.PREALLOCATED_VOL, sc.SPARSE_VOL]: volParams['prealloc'] = preallocate initial_size = self.calculate_initial_size( destDom.supportsSparseness, dstVolFormat, volParams['prealloc'], dst_vol_allocation) self.log.info( "Copy source %s:%s:%s to destination %s:%s:%s " "capacity=%s, initial size=%s", sdUUID, srcImgUUID, srcVolUUID, dstSdUUID, dstImgUUID, dstVolUUID, volParams['capacity'], initial_size) # If image already exists check whether it illegal/fake, # overwrite it if not self.isLegal(dstSdUUID, dstImgUUID): force = True # We must first remove the previous instance of image (if # exists) in destination domain, if we got the overwrite # command if force: self.log.info( "delete image %s on domain %s before " "overwriting", dstImgUUID, dstSdUUID) _deleteImage(destDom, dstImgUUID, postZero, discard) destDom.createVolume(imgUUID=dstImgUUID, capacity=volParams['capacity'], volFormat=dstVolFormat, preallocate=volParams['prealloc'], diskType=volParams['disktype'], volUUID=dstVolUUID, desc=descr, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, initial_size=initial_size) dstVol = sdCache.produce(dstSdUUID).produceVolume( imgUUID=dstImgUUID, volUUID=dstVolUUID) except se.StorageException: self.log.error("Unexpected error", exc_info=True) raise except Exception as e: self.log.error("Unexpected error", exc_info=True) raise se.CopyImageError("Destination volume %s error: %s" % (dstVolUUID, str(e))) try: # Start the actual copy image procedure dstVol.prepare(rw=True, setrw=True) if (destDom.supportsSparseness and dstVol.getType() == sc.PREALLOCATED_VOL): preallocation = qemuimg.PREALLOCATION.FALLOC else: preallocation = None try: operation = qemuimg.convert( volParams['path'], dstVol.getVolumePath(), srcFormat=sc.fmt2str(volParams['volFormat']), dstFormat=sc.fmt2str(dstVolFormat), dstQcow2Compat=destDom.qcow2_compat(), preallocation=preallocation, unordered_writes=destDom.recommends_unordered_writes( dstVolFormat), create=not destDom.is_block(), ) with utils.stopwatch("Copy volume %s" % srcVol.volUUID): self._run_qemuimg_operation(operation) except ActionStopped: raise except cmdutils.Error as e: self.log.exception('conversion failure for volume %s', srcVol.volUUID) raise se.CopyImageError(str(e)) # Mark volume as SHARED if volType == sc.SHARED_VOL: dstVol.setShared() dstVol.setLegality(sc.LEGAL_VOL) if force: # Now we should re-link all deleted hardlinks, if exists destDom.templateRelink(dstImgUUID, dstVolUUID) except se.StorageException: self.log.error("Unexpected error", exc_info=True) raise except Exception as e: self.log.error("Unexpected error", exc_info=True) raise se.CopyImageError("src image=%s, dst image=%s: msg=%s" % (srcImgUUID, dstImgUUID, str(e))) self.log.info("Finished copying %s:%s -> %s:%s", sdUUID, srcVolUUID, dstSdUUID, dstVolUUID) # TODO: handle return status return dstVolUUID finally: self.__cleanupCopy(srcVol=srcVol, dstVol=dstVol)
def cloneStructure(self, sdUUID, imgUUID, dstSdUUID): self._createTargetImage(sdCache.produce(dstSdUUID), sdUUID, imgUUID)
def getChain(self, sdUUID, imgUUID, volUUID=None): """ Return the chain of volumes of image as a sorted list (not including a shared base (template) if any) """ chain = [] volclass = sdCache.produce(sdUUID).getVolumeClass() # Use volUUID when provided if volUUID: srcVol = volclass(self.repoPath, sdUUID, imgUUID, volUUID) # For template images include only one volume (the template itself) # NOTE: this relies on the fact that in a template there is only # one volume if srcVol.isShared(): return [srcVol] # Find all the volumes when volUUID is not provided else: # Find all volumes of image uuidlist = volclass.getImageVolumes(sdUUID, imgUUID) if not uuidlist: raise se.ImageDoesNotExistInSD(imgUUID, sdUUID) srcVol = volclass(self.repoPath, sdUUID, imgUUID, uuidlist[0]) # For template images include only one volume (the template itself) if len(uuidlist) == 1 and srcVol.isShared(): return [srcVol] # Searching for the leaf for vol in uuidlist: srcVol = volclass(self.repoPath, sdUUID, imgUUID, vol) if srcVol.isLeaf(): break srcVol = None if not srcVol: self.log.error("There is no leaf in the image %s", imgUUID) raise se.ImageIsNotLegalChain(imgUUID) # We have seen corrupted chains that cause endless loops here. # https://bugzilla.redhat.com/1125197 seen = set() # Build up the sorted parent -> child chain while not srcVol.isShared(): chain.insert(0, srcVol) seen.add(srcVol.volUUID) parentUUID = srcVol.getParent() if parentUUID == sc.BLANK_UUID: break if parentUUID in seen: self.log.error("Image %s volume %s has invalid parent UUID %s", imgUUID, srcVol.volUUID, parentUUID) raise se.ImageIsNotLegalChain(imgUUID) srcVol = srcVol.getParentVolume() return chain