Beispiel #1
0
 def extendSizeFinalize(cls, taskObj, sdUUID, imgUUID, volUUID):
     cls.log.debug("finalizing size extension for volume %s on domain "
                   "%s", volUUID, sdUUID)
     # The rollback consists in just updating the metadata to be
     # consistent with the volume real/virtual size.
     sdCache.produce(sdUUID) \
            .produceVolume(imgUUID, volUUID).syncMetadata()
Beispiel #2
0
 def extendSizeFinalize(cls, taskObj, sdUUID, imgUUID, volUUID):
     cls.log.debug("finalizing size extension for volume %s on domain "
                   "%s", volUUID, sdUUID)
     # The rollback consists in just updating the metadata to be
     # consistent with the volume real/virtual size.
     sdCache.produce(sdUUID) \
            .produceVolume(imgUUID, volUUID).syncMetadata()
Beispiel #3
0
    def prepare(self, sdUUID, imgUUID, volUUID=None):
        chain = self.getChain(sdUUID, imgUUID, volUUID)

        # Adding the image template to the chain
        tmplVolume = chain[0].getParentVolume()

        if tmplVolume:
            chain.insert(0, tmplVolume)

        # Activating the volumes
        sdCache.produce(sdUUID).activateVolumes(
            imgUUID, volUUIDs=[vol.volUUID for vol in chain])

        return chain
Beispiel #4
0
    def newVolumeLease(cls, sdUUID, volUUID, leaseSlot):
        dom = sdCache.produce(sdUUID)

        if dom.hasVolumeLeases():
            leasePath = dom.getLeasesFilePath()
            leaseOffset = (leaseSlot + RESERVED_LEASES) * dom.logBlkSize * sd.LEASE_BLOCKS
            sanlock.init_resource(sdUUID, volUUID, [(leasePath, leaseOffset)])
Beispiel #5
0
    def __getResourceCandidatesList(self, resourceName, lockType):
        """
        Return list of lock candidates (template and volumes)
        """
        volResourcesList = []
        template = None
        dom = sdCache.produce(sdUUID=self.sdUUID)
        # Get the list of the volumes
        repoPath = os.path.join(self.storage_repository, dom.getPools()[0])
        try:
            chain = image.Image(repoPath).getChain(sdUUID=self.sdUUID, imgUUID=resourceName)
        except se.ImageDoesNotExistInSD:
            log.debug("Image %s does not exist in domain %s", resourceName, self.sdUUID)
            return []

        # check if the chain is build above a template, or it is a standalone
        pvol = chain[0].getParentVolume()
        if pvol:
            template = pvol.volUUID
        elif chain[0].isShared():
            # Image of template itself,
            # with no other volumes in chain
            template = chain[0].volUUID
            del chain[:]

        volUUIDChain = [vol.volUUID for vol in chain]
        volUUIDChain.sort()

        # Activate all volumes in chain at once.
        # We will attempt to activate all volumes again down to the flow with
        # no consequence, since they are already active.
        # TODO Fix resource framework to hold images, instead of specific vols.
        # This assumes that chains can not spread into more than one SD.
        if dom.__class__.__name__ == "BlockStorageDomain":
            lvm.activateLVs(self.sdUUID, volUUIDChain)

        failed = False
        # Acquire template locks:
        # - 'lockType' for template's image itself
        # - Always 'shared' lock for image based on template
        try:
            if template:
                if len(volUUIDChain) > 0:
                    volRes = rmanager.acquireResource(self.volumeResourcesNamespace, template, rm.LockType.shared,
                                                      timeout=self.resource_default_timeout)
                else:
                    volRes = rmanager.acquireResource(self.volumeResourcesNamespace, template, lockType,
                                                      timeout=self.resource_default_timeout)
                volResourcesList.append(volRes)

            # Acquire 'lockType' volume locks
            for volUUID in volUUIDChain:
                volRes = rmanager.acquireResource(self.volumeResourcesNamespace, volUUID, lockType,
                                                    timeout=self.resource_default_timeout)

                volResourcesList.append(volRes)
        except (rm.RequestTimedOutError, se.ResourceAcqusitionFailed), e:
            log.debug("Cannot acquire volume resource (%s)", str(e))
            failed = True
            raise
Beispiel #6
0
    def rebaseVolumeRollback(cls, taskObj, sdUUID, srcImg, srcVol, dstFormat,
                             srcParent, unsafe):
        """
        Rebase volume rollback
        """
        cls.log.info(
            'rebase volume rollback (sdUUID=%s srcImg=%s srcVol=%s '
            'dstFormat=%s srcParent=%s)', sdUUID, srcImg, srcVol, dstFormat,
            srcParent)

        imageResourcesNamespace = sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID)

        with rm.acquireResource(imageResourcesNamespace, srcImg, rm.EXCLUSIVE):
            vol = sdCache.produce(sdUUID).produceVolume(srcImg, srcVol)
            vol.prepare(rw=True, chainrw=True, setrw=True)

            volumePath = vol.getVolumePath()
            backingVolPath = getBackingVolumePath(srcImg, srcParent)

            try:
                qemuimg.rebase(volumePath, backingVolPath,
                               sc.fmt2str(vol.getFormat()),
                               sc.fmt2str(int(dstFormat)),
                               misc.parseBool(unsafe), vars.task.aborting)
                vol.setParent(srcParent)
                vol.recheckIfLeaf()
            except qemuimg.QImgError:
                cls.log.exception(
                    'cannot rollback rebase for volume %s on '
                    '%s', volumePath, backingVolPath)
                raise se.MergeVolumeRollbackError(srcVol)
            finally:
                vol.teardown(sdUUID, srcVol)
Beispiel #7
0
 def clone(self, dstPath, volFormat):
     """
     Clone self volume to the specified dst_image_dir/dst_volUUID
     """
     wasleaf = False
     taskName = "parent volume rollback: " + self.volUUID
     vars.task.pushRecovery(
         task.Recovery(taskName, "volume", "Volume", "parentVolumeRollback",
                       [self.sdUUID, self.imgUUID, self.volUUID]))
     if self.isLeaf():
         wasleaf = True
         self.setInternal()
     try:
         self.prepare(rw=False)
         self.log.debug('cloning volume %s to %s', self.volumePath, dstPath)
         parent = getBackingVolumePath(self.imgUUID, self.volUUID)
         domain = sdCache.produce(self.sdUUID)
         qemuimg.create(dstPath,
                        backing=parent,
                        format=sc.fmt2str(volFormat),
                        qcow2Compat=domain.qcow2_compat(),
                        backingFormat=sc.fmt2str(self.getFormat()))
         self.teardown(self.sdUUID, self.volUUID)
     except Exception as e:
         self.log.exception('cannot clone image %s volume %s to %s',
                            self.imgUUID, self.volUUID, dstPath)
         # FIXME: might race with other clones
         if wasleaf:
             self.setLeaf()
         self.teardown(self.sdUUID, self.volUUID)
         raise se.CannotCloneVolume(self.volumePath, dstPath, str(e))
Beispiel #8
0
    def __teardownSubChain(self, sdUUID, imgUUID, chain):
        """
        Teardown all volumes in the sub-chain
        """
        if not chain:
            raise se.InvalidParameterException("chain", str(chain))

        # Teardown subchain ('ancestor' ->...-> 'successor') volumes
        # before they will deleted.
        # This subchain include volumes that were merged (rebased)
        # into 'successor' and now should be deleted.
        # We prepared all these volumes as part of preparing the whole
        # chain before rebase, but during rebase we detached all of them from
        # the chain and couldn't teardown they properly.
        # So, now we must teardown them to release they resources.
        volclass = sdCache.produce(sdUUID).getVolumeClass()
        ancestor = chain[0]
        successor = chain[-1]
        srcVol = volclass(self.repoPath, sdUUID, imgUUID, successor)
        dstParent = volclass(self.repoPath, sdUUID, imgUUID,
                             ancestor).getParent()

        while srcVol and dstParent != srcVol.volUUID:
            try:
                self.log.info("Teardown volume %s from image %s",
                              srcVol.volUUID, imgUUID)
                vol = srcVol.getParentVolume()
                srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID,
                                justme=True)
                srcVol = vol
            except Exception:
                self.log.info("Failure to teardown volume %s in subchain %s "
                              "-> %s", srcVol.volUUID, ancestor, successor,
                              exc_info=True)
Beispiel #9
0
    def rebaseVolumeRollback(cls, taskObj, sdUUID, srcImg, srcVol, dstFormat, srcParent, unsafe):
        """
        Rebase volume rollback
        """
        cls.log.info("sdUUID=%s srcImg=%s srcVol=%s dstFormat=%s srcParent=%s",
                     sdUUID, srcImg, srcVol, dstFormat, srcParent)

        imageResourcesNamespace = sd.getNamespace(sdUUID, resourceFactories.IMAGE_NAMESPACE)
        with rmanager.acquireResource(imageResourcesNamespace, srcImg, rm.LockType.exclusive):
            try:
                vol = sdCache.produce(sdUUID).produceVolume(imgUUID=srcImg, volUUID=srcVol)
                vol.prepare(rw=True, chainrw=True, setrw=True)
            except Exception:
                cls.log.error("sdUUID=%s srcImg=%s srcVol=%s dstFormat=%s srcParent=%s",
                               sdUUID, srcImg, srcVol, dstFormat, srcParent, exc_info=True)
                raise

            try:
                (rc, out, err) = qemuRebase(vol.getVolumePath(), vol.getFormat(),
                                            os.path.join('..', srcImg, srcParent),
                                            int(dstFormat), misc.parseBool(unsafe),
                                            vars.task.aborting, False)
                if rc:
                    raise se.MergeVolumeRollbackError(srcVol)

                vol.setParent(srcParent)
                vol.recheckIfLeaf()
            except Exception:
                cls.log.error("sdUUID=%s srcImg=%s srcVol=%s dstFormat=%s srcParent=%s",
                               sdUUID, srcImg, srcVol, dstFormat, srcParent, exc_info=True)
                raise
            finally:
                vol.teardown(sdUUID, srcVol)
Beispiel #10
0
    def getAllChildrenList(cls, repoPath, sdUUID, imgUUID, pvolUUID):
        """
        Fetch the list of children volumes (across the all images in domain)
        """
        volList = []
        # FIXME!!! We cannot check hardlinks in 'backup' domain, because of possibility of overwriting
        #  'fake' volumes that have hardlinks with 'legal' volumes with same uuid and without hardlinks
        # First, check number of hardlinks
        ## volPath = os.path.join(cls.storage_repository, spUUID, sdUUID, sd.DOMAIN_IMAGES, imgUUID, pvolUUID)
        ## if os.path.exists(volPath):
        ##     if os.stat(volPath).st_nlink == 1:
        ##         return volList
        ## else:
        ##     cls.log.info("Volume %s does not exist", volPath)
        ##     return volList
        # scan whole domain
        pattern = os.path.join(repoPath, sdUUID, sd.DOMAIN_IMAGES, "*",
                               "*.meta")
        files = oop.getProcessPool(sdUUID).glob.glob(pattern)
        for i in files:
            volid = os.path.splitext(os.path.basename(i))[0]
            imgUUID = os.path.basename(os.path.dirname(i))
            if sdCache.produce(sdUUID).produceVolume(
                    imgUUID, volid).getParent() == pvolUUID:
                volList.append({'imgUUID': imgUUID, 'volUUID': volid})

        return volList
Beispiel #11
0
    def getAllChildrenList(cls, repoPath, sdUUID, imgUUID, pvolUUID):
        """
        Fetch the list of children volumes (across the all images in domain)
        """
        volList = []
        # FIXME!!! We cannot check hardlinks in 'backup' domain, because of possibility of overwriting
        #  'fake' volumes that have hardlinks with 'legal' volumes with same uuid and without hardlinks
        # First, check number of hardlinks
     ## volPath = os.path.join(cls.storage_repository, spUUID, sdUUID, sd.DOMAIN_IMAGES, imgUUID, pvolUUID)
     ## if os.path.exists(volPath):
     ##     if os.stat(volPath).st_nlink == 1:
     ##         return volList
     ## else:
     ##     cls.log.info("Volume %s does not exist", volPath)
     ##     return volList
        # scan whole domain
        pattern = os.path.join(repoPath, sdUUID, sd.DOMAIN_IMAGES, "*", "*.meta")
        files = oop.getProcessPool(sdUUID).glob.glob(pattern)
        for i in files:
            volid = os.path.splitext(os.path.basename(i))[0]
            imgUUID = os.path.basename(os.path.dirname(i))
            if sdCache.produce(sdUUID).produceVolume(imgUUID, volid).getParent() == pvolUUID:
                volList.append({'imgUUID':imgUUID, 'volUUID':volid})

        return volList
Beispiel #12
0
    def rebaseVolumeRollback(cls, taskObj, sdUUID, srcImg,
                             srcVol, dstFormat, srcParent, unsafe):
        """
        Rebase volume rollback
        """
        cls.log.info('rebase volume rollback (sdUUID=%s srcImg=%s srcVol=%s '
                     'dstFormat=%s srcParent=%s)', sdUUID, srcImg, srcVol,
                     dstFormat, srcParent)

        imageResourcesNamespace = sd.getNamespace(
            sdUUID,
            resourceFactories.IMAGE_NAMESPACE)

        with rmanager.acquireResource(imageResourcesNamespace,
                                      srcImg, rm.LockType.exclusive):
            vol = sdCache.produce(sdUUID).produceVolume(srcImg, srcVol)
            vol.prepare(rw=True, chainrw=True, setrw=True)

            volumePath = vol.getVolumePath()
            backingVolPath = getBackingVolumePath(srcImg, srcParent)

            try:
                qemuimg.rebase(volumePath, backingVolPath,
                               fmt2str(vol.getFormat()),
                               fmt2str(int(dstFormat)),
                               misc.parseBool(unsafe), vars.task.aborting)
                vol.setParent(srcParent)
                vol.recheckIfLeaf()
            except qemuimg.QImgError:
                cls.log.exception('cannot rollback rebase for volume %s on '
                                  '%s', volumePath, backingVolPath)
                raise se.MergeVolumeRollbackError(srcVol)
            finally:
                vol.teardown(sdUUID, srcVol)
Beispiel #13
0
    def syncData(self, sdUUID, imgUUID, dstSdUUID, syncType):
        srcChain = self.getChain(sdUUID, imgUUID)
        dstChain = self.getChain(dstSdUUID, imgUUID)

        if syncType == SYNC_VOLUMES_INTERNAL:
            try:
                # Removing the leaf volumes
                del srcChain[-1], dstChain[-1]
            except IndexError:
                raise se.ImageIsNotLegalChain()
        elif syncType == SYNC_VOLUMES_LEAF:
            try:
                # Removing all the internal volumes
                del srcChain[:-1], dstChain[:-1]
            except IndexError:
                raise se.ImageIsNotLegalChain()
        elif syncType != SYNC_VOLUMES_ALL:
            raise se.NotImplementedException()

        if len(srcChain) != len(dstChain):
            raise se.DestImageActionError(imgUUID, dstSdUUID)

        # Checking the volume uuids (after removing the leaves to allow
        # different uuids for the current top layer, see previous check).
        for i, v in enumerate(srcChain):
            if v.volUUID != dstChain[i].volUUID:
                raise se.DestImageActionError(imgUUID, dstSdUUID)

        dstDom = sdCache.produce(dstSdUUID)

        self._interImagesCopy(dstDom, sdUUID, imgUUID,
                              {'srcChain': srcChain, 'dstChain': dstChain})
        self._finalizeDestinationImage(dstDom, imgUUID,
                                       {'srcChain': srcChain,
                                        'dstChain': dstChain}, False)
Beispiel #14
0
 def clone(self, dstPath, volFormat):
     """
     Clone self volume to the specified dst_image_dir/dst_volUUID
     """
     wasleaf = False
     taskName = "parent volume rollback: " + self.volUUID
     vars.task.pushRecovery(
         task.Recovery(taskName, "volume", "Volume",
                       "parentVolumeRollback",
                       [self.sdUUID, self.imgUUID, self.volUUID]))
     if self.isLeaf():
         wasleaf = True
         self.setInternal()
     try:
         self.prepare(rw=False)
         self.log.debug('cloning volume %s to %s', self.volumePath,
                        dstPath)
         parent = getBackingVolumePath(self.imgUUID, self.volUUID)
         domain = sdCache.produce(self.sdUUID)
         qemuimg.create(dstPath, backing=parent,
                        format=sc.fmt2str(volFormat),
                        qcow2Compat=domain.qcow2_compat(),
                        backingFormat=sc.fmt2str(self.getFormat()))
         self.teardown(self.sdUUID, self.volUUID)
     except Exception as e:
         self.log.exception('cannot clone image %s volume %s to %s',
                            self.imgUUID, self.volUUID, dstPath)
         # FIXME: might race with other clones
         if wasleaf:
             self.setLeaf()
         self.teardown(self.sdUUID, self.volUUID)
         raise se.CannotCloneVolume(self.volumePath, dstPath, str(e))
Beispiel #15
0
    def upload(self, methodArgs, sdUUID, imgUUID, volUUID=None):
        domain = sdCache.produce(sdUUID)

        vol = self._activateVolumeForImportExport(domain, imgUUID, volUUID)
        try:
            imageSharing.upload(vol.getVolumePath(), methodArgs)
        finally:
            domain.deactivateImage(imgUUID)
Beispiel #16
0
 def getParentVolume(self):
     """
     Return parent volume object
     """
     puuid = self.getParent()
     if puuid and puuid != BLANK_UUID:
         return sdCache.produce(self.sdUUID).produceVolume(self.imgUUID, puuid)
     return None
Beispiel #17
0
    def create(
        cls, repoPath, sdUUID, imgUUID, size, volFormat, preallocate, diskType, volUUID, desc, srcImgUUID, srcVolUUID
    ):
        """
       Create a new volume with given size or snapshot
            'size' - in sectors
            'volFormat' - volume format COW / RAW
            'preallocate' - Prealocate / Sparse
            'diskType' - string that describes disk type System|Data|Shared|Swap|Temp
            'srcImgUUID' - source image UUID
            'srcVolUUID' - source volume UUID
        """
        if not volUUID:
            volUUID = str(uuid.uuid4())
        if volUUID == volume.BLANK_UUID:
            raise se.InvalidParameterException("volUUID", volUUID)

        # Validate volume parameters should be checked here for all
        # internal flows using volume creation.
        cls.validateCreateVolumeParams(volFormat, preallocate, srcVolUUID)

        mysd = sdCache.produce(sdUUID=sdUUID)
        try:
            lvm.getLV(sdUUID, volUUID)
        except se.LogicalVolumeDoesNotExistError:
            pass  # OK, this is a new volume
        else:
            raise se.VolumeAlreadyExists(volUUID)

        imageDir = image.Image(repoPath).create(sdUUID, imgUUID)
        vol_path = os.path.join(imageDir, volUUID)
        pvol = None
        voltype = "LEAF"

        try:
            if srcVolUUID != volume.BLANK_UUID:
                # We have a parent
                if srcImgUUID == volume.BLANK_UUID:
                    srcImgUUID = imgUUID
                pvol = BlockVolume(repoPath, sdUUID, srcImgUUID, srcVolUUID)
                # Cannot create snapshot for ILLEGAL volume
                if not pvol.isLegal():
                    raise se.createIllegalVolumeSnapshotError(pvol.volUUID)

                if imgUUID != srcImgUUID:
                    pvol.share(imageDir, hard=False)
                    pvol = BlockVolume(repoPath, sdUUID, imgUUID, srcVolUUID)

                # override size param by parent's size
                size = pvol.getSize()
        except se.StorageException:
            cls.log.error("Unexpected error", exc_info=True)
            raise
        except Exception, e:
            cls.log.error("Unexpected error", exc_info=True)
            raise se.VolumeCannotGetParent(
                "blockVolume can't get parent %s for volume %s: %s" % (srcVolUUID, volUUID, str(e))
            )
Beispiel #18
0
    def create(cls, repoPath, sdUUID, imgUUID, size, volFormat, preallocate,
               diskType, volUUID, desc, srcImgUUID, srcVolUUID):
        """
       Create a new volume with given size or snapshot
            'size' - in sectors
            'volFormat' - volume format COW / RAW
            'preallocate' - Prealocate / Sparse
            'diskType' - string that describes disk type System|Data|Shared|Swap|Temp
            'srcImgUUID' - source image UUID
            'srcVolUUID' - source volume UUID
        """
        if not volUUID:
            volUUID = str(uuid.uuid4())
        if volUUID == volume.BLANK_UUID:
            raise se.InvalidParameterException("volUUID", volUUID)

        # Validate volume parameters should be checked here for all
        # internal flows using volume creation.
        cls.validateCreateVolumeParams(volFormat, preallocate, srcVolUUID)

        mysd = sdCache.produce(sdUUID=sdUUID)
        try:
            lvm.getLV(sdUUID, volUUID)
        except se.LogicalVolumeDoesNotExistError:
            pass  #OK, this is a new volume
        else:
            raise se.VolumeAlreadyExists(volUUID)

        imageDir = image.Image(repoPath).create(sdUUID, imgUUID)
        vol_path = os.path.join(imageDir, volUUID)
        pvol = None
        voltype = "LEAF"

        try:
            if srcVolUUID != volume.BLANK_UUID:
                # We have a parent
                if srcImgUUID == volume.BLANK_UUID:
                    srcImgUUID = imgUUID
                pvol = BlockVolume(repoPath, sdUUID, srcImgUUID, srcVolUUID)
                # Cannot create snapshot for ILLEGAL volume
                if not pvol.isLegal():
                    raise se.createIllegalVolumeSnapshotError(pvol.volUUID)

                if imgUUID != srcImgUUID:
                    pvol.share(imageDir, hard=False)
                    pvol = BlockVolume(repoPath, sdUUID, imgUUID, srcVolUUID)

                # override size param by parent's size
                size = pvol.getSize()
        except se.StorageException:
            cls.log.error("Unexpected error", exc_info=True)
            raise
        except Exception, e:
            cls.log.error("Unexpected error", exc_info=True)
            raise se.VolumeCannotGetParent(
                "blockVolume can't get parent %s for volume %s: %s" %
                (srcVolUUID, volUUID, str(e)))
Beispiel #19
0
 def getParentVolume(self):
     """
     Return parent VolumeManifest object
     """
     puuid = self.getParent()
     if puuid and puuid != sc.BLANK_UUID:
         sd_manifest = sdCache.produce(self.sdUUID).manifest
         return sd_manifest.produceVolume(self.imgUUID, puuid)
     return None
Beispiel #20
0
 def getParentVolume(self):
     """
     Return parent VolumeManifest object
     """
     puuid = self.getParent()
     if puuid and puuid != sc.BLANK_UUID:
         sd_manifest = sdCache.produce(self.sdUUID).manifest
         return sd_manifest.produceVolume(self.imgUUID, puuid)
     return None
Beispiel #21
0
 def getVolumeSize(self, bs=512):
     """
     Return the volume size in blocks
     """
     # Just call the class method getVSize() - apparently it does what
     # we need. We consider incurred overhead of producing the SD object
     # to be a small price for code de-duplication.
     sdobj = sdCache.produce(sdUUID=self.sdUUID)
     return self.getVSize(sdobj, self.imgUUID, self.volUUID, bs)
Beispiel #22
0
 def getVolumeSize(self, bs=512):
     """
     Return the volume size in blocks
     """
     # Just call the class method getVSize() - apparently it does what
     # we need. We consider incurred overhead of producing the SD object
     # to be a small price for code de-duplication.
     sdobj = sdCache.produce(sdUUID=self.sdUUID)
     return self.getVSize(sdobj, self.imgUUID, self.volUUID, bs)
Beispiel #23
0
 def getVolumeSize(self, bs=BLOCK_SIZE):
     """
     Return the volume size in blocks
     """
     # Just call the SD Manifest method getVSize() - apparently it does what
     # we need. We consider incurred overhead of producing the object
     # to be a small price for code de-duplication.
     manifest = sdCache.produce(self.sdUUID).manifest
     return int(manifest.getVSize(self.imgUUID, self.volUUID) / bs)
Beispiel #24
0
 def getVolumeSize(self, bs=BLOCK_SIZE):
     """
     Return the volume size in blocks
     """
     # Just call the SD Manifest method getVSize() - apparently it does what
     # we need. We consider incurred overhead of producing the object
     # to be a small price for code de-duplication.
     manifest = sdCache.produce(self.sdUUID).manifest
     return int(manifest.getVSize(self.imgUUID, self.volUUID) / bs)
Beispiel #25
0
    def move(self, srcSdUUID, dstSdUUID, imgUUID, vmUUID, op, postZero, force):
        """
        Move/Copy image between storage domains within same storage pool
        """
        self.log.info("srcSdUUID=%s dstSdUUID=%s imgUUID=%s vmUUID=%s op=%s "
                      "force=%s postZero=%s", srcSdUUID, dstSdUUID, imgUUID,
                      vmUUID, OP_TYPES[op], str(force), str(postZero))

        destDom = sdCache.produce(dstSdUUID)
        # If image already exists check whether it illegal/fake, overwrite it
        if not self.isLegal(destDom.sdUUID, imgUUID):
            force = True
        # We must first remove the previous instance of image (if exists)
        # in destination domain, if we got the overwrite command
        if force:
            self.log.info("delete image %s on domain %s before overwriting",
                          imgUUID, destDom.sdUUID)
            _deleteImage(destDom, imgUUID, postZero)

        chains = self._createTargetImage(destDom, srcSdUUID, imgUUID)
        self._interImagesCopy(destDom, srcSdUUID, imgUUID, chains)
        self._finalizeDestinationImage(destDom, imgUUID, chains, force)
        if force:
            leafVol = chains['dstChain'][-1]
            # Now we should re-link all deleted hardlinks, if exists
            destDom.templateRelink(imgUUID, leafVol.volUUID)

        # At this point we successfully finished the 'copy' part of the
        # operation and we can clear all recoveries.
        vars.task.clearRecoveries()
        # If it's 'move' operation, we should delete src image after copying
        if op == MOVE_OP:
            # TODO: Should raise here.
            try:
                dom = sdCache.produce(srcSdUUID)
                _deleteImage(dom, imgUUID, postZero)
            except se.StorageException:
                self.log.warning("Failed to remove img: %s from srcDom %s: "
                                 "after it was copied to: %s", imgUUID,
                                 srcSdUUID, dstSdUUID)

        self.log.info("%s task on image %s was successfully finished",
                      OP_TYPES[op], imgUUID)
        return True
Beispiel #26
0
    def getChain(self, sdUUID, imgUUID, volUUID=None):
        """
        Return the chain of volumes of image as a sorted list
        (not including a shared base (template) if any)
        """
        chain = []
        volclass = sdCache.produce(sdUUID).getVolumeClass()

        # Use volUUID when provided
        if volUUID:
            srcVol = volclass(self.repoPath, sdUUID, imgUUID, volUUID)

            # For template images include only one volume (the template itself)
            # NOTE: this relies on the fact that in a template there is only
            #       one volume
            if srcVol.isShared():
                return [srcVol]

        # Find all the volumes when volUUID is not provided
        else:
            # Find all volumes of image
            uuidlist = volclass.getImageVolumes(self.repoPath, sdUUID, imgUUID)

            if not uuidlist:
                raise se.ImageDoesNotExistInSD(imgUUID, sdUUID)

            srcVol = volclass(self.repoPath, sdUUID, imgUUID, uuidlist[0])

            # For template images include only one volume (the template itself)
            if len(uuidlist) == 1 and srcVol.isShared():
                return [srcVol]

            # Searching for the leaf
            for vol in uuidlist:
                srcVol = volclass(self.repoPath, sdUUID, imgUUID, vol)

                if srcVol.isLeaf():
                    break

                srcVol = None

            if not srcVol:
                self.log.error("There is no leaf in the image %s", imgUUID)
                raise se.ImageIsNotLegalChain(imgUUID)

        # Build up the sorted parent -> child chain
        while not srcVol.isShared():
            chain.insert(0, srcVol)

            if srcVol.getParent() == volume.BLANK_UUID:
                break

            srcVol = srcVol.getParentVolume()

        self.log.info("sdUUID=%s imgUUID=%s chain=%s ", sdUUID, imgUUID, chain)
        return chain
Beispiel #27
0
 def parentVolumeRollback(cls, taskObj, sdUUID, pimgUUID, pvolUUID):
     cls.log.info("parentVolumeRollback: sdUUID=%s pimgUUID=%s" " pvolUUID=%s" % (sdUUID, pimgUUID, pvolUUID))
     try:
         if pvolUUID != BLANK_UUID and pimgUUID != BLANK_UUID:
             pvol = sdCache.produce(sdUUID).produceVolume(pimgUUID, pvolUUID)
             if not pvol.isShared() and not pvol.recheckIfLeaf():
                 pvol.setLeaf()
             pvol.teardown(sdUUID, pvolUUID)
     except Exception:
         cls.log.error("Unexpected error", exc_info=True)
Beispiel #28
0
 def getChildrenList(self):
     """
     Fetch the list of children volumes (in single image)
     """
     vols = self.getImageVolumes(self.repoPath, self.sdUUID, self.imgUUID)
     children = []
     for v in vols:
         if sdCache.produce(self.sdUUID).produceVolume(self.imgUUID, v).getParent() == self.volUUID:
             children.append(v)
     return children
Beispiel #29
0
    def download(self, methodArgs, sdUUID, imgUUID, volUUID=None):
        domain = sdCache.produce(sdUUID)

        vol = self._activateVolumeForImportExport(domain, imgUUID, volUUID)
        try:
            # Extend the volume (if relevant) to the image size
            vol.extend(imageSharing.getSize(methodArgs) / volume.BLOCK_SIZE)
            imageSharing.download(vol.getVolumePath(), methodArgs)
        finally:
            domain.deactivateImage(imgUUID)
Beispiel #30
0
    def newVolumeLease(cls, sdUUID, volUUID, volPath):
        dom = sdCache.produce(sdUUID)
        procPool = oop.getProcessPool(sdUUID)

        if dom.hasVolumeLeases():
            leasePath = cls.__leaseVolumePath(volPath)
            procPool.createSparseFile(leasePath, LEASE_FILEOFFSET)
            cls.file_setrw(leasePath, rw=True)
            sanlock.init_resource(sdUUID, volUUID,
                                  [(leasePath, LEASE_FILEOFFSET)])
Beispiel #31
0
 def __cleanupMultimove(self, sdUUID, imgList, postZero=False):
     """
     Cleanup environments after multiple-move operation
     """
     for imgUUID in imgList:
         try:
             dom = sdCache.produce(sdUUID)
             _deleteImage(dom, imgUUID, postZero)
         except se.StorageException:
             self.log.warning("Delete image failed for image: %s in SD: %s",
                              imgUUID, sdUUID, exc_info=True)
Beispiel #32
0
 def setrw(self, rw):
     # Since domain version 3 (V3) VDSM is not changing the internal volumes
     # permissions to read-only because it would interfere with the live
     # snapshots and the live merge processes. E.g.: during a live snapshot
     # if the VM is running on the SPM it would lose the ability to write to
     # the current volume.
     # However to avoid lvm MDA corruption we still need to set the volume
     # as read-only on domain version 2. The corruption is triggered on the
     # HSMs that are using the resource manager to prepare the volume chain.
     if int(sdCache.produce(self.sdUUID).getVersion()) < 3:
         self._setrw(rw=rw)
Beispiel #33
0
 def setrw(self, rw):
     # Since domain version 3 (V3) VDSM is not changing the internal volumes
     # permissions to read-only because it would interfere with the live
     # snapshots and the live merge processes. E.g.: during a live snapshot
     # if the VM is running on the SPM it would lose the ability to write to
     # the current volume.
     # However to avoid lvm MDA corruption we still need to set the volume
     # as read-only on domain version 2. The corruption is triggered on the
     # HSMs that are using the resource manager to prepare the volume chain.
     if int(sdCache.produce(self.sdUUID).getVersion()) < 3:
         self._setrw(rw=rw)
Beispiel #34
0
 def parentVolumeRollback(cls, taskObj, sdUUID, pimgUUID, pvolUUID):
     cls.log.info("parentVolumeRollback: sdUUID=%s pimgUUID=%s"\
                  " pvolUUID=%s" % (sdUUID, pimgUUID, pvolUUID))
     try:
         if pvolUUID != BLANK_UUID and pimgUUID != BLANK_UUID:
             pvol = sdCache.produce(sdUUID).produceVolume(pimgUUID, pvolUUID)
             if not pvol.isShared() and not pvol.recheckIfLeaf():
                 pvol.setLeaf()
             pvol.teardown(sdUUID, pvolUUID)
     except Exception:
         cls.log.error("Unexpected error", exc_info=True)
Beispiel #35
0
    def newVolumeLease(cls, metaId, sdUUID, volUUID):
        cls.log.debug("Initializing volume lease volUUID=%s sdUUID=%s, "
                      "metaId=%s", volUUID, sdUUID, metaId)
        dom = sdCache.produce(sdUUID)
        metaSdUUID, mdSlot = metaId

        leasePath = dom.getLeasesFilePath()
        leaseOffset = ((mdSlot + RESERVED_LEASES)
                       * dom.logBlkSize * sd.LEASE_BLOCKS)

        sanlock.init_resource(sdUUID, volUUID, [(leasePath, leaseOffset)])
Beispiel #36
0
 def parentVolumeRollback(cls, taskObj, sdUUID, pimgUUID, pvolUUID):
     cls.log.info("parentVolumeRollback: sdUUID=%s pimgUUID=%s"
                  " pvolUUID=%s" % (sdUUID, pimgUUID, pvolUUID))
     if pvolUUID != sc.BLANK_UUID and pimgUUID != sc.BLANK_UUID:
         pvol = sdCache.produce(sdUUID).produceVolume(pimgUUID, pvolUUID)
         pvol.prepare()
         try:
             pvol.recheckIfLeaf()
         except Exception:
             cls.log.error("Unexpected error", exc_info=True)
         finally:
             pvol.teardown(sdUUID, pvolUUID)
Beispiel #37
0
    def newVolumeLease(cls, metaId, sdUUID, volUUID):
        cls.log.debug(
            "Initializing volume lease volUUID=%s sdUUID=%s, "
            "metaId=%s", volUUID, sdUUID, metaId)
        dom = sdCache.produce(sdUUID)
        metaSdUUID, mdSlot = metaId

        leasePath = dom.getLeasesFilePath()
        leaseOffset = ((mdSlot + RESERVED_LEASES) * dom.logBlkSize *
                       sd.LEASE_BLOCKS)

        sanlock.init_resource(sdUUID, volUUID, [(leasePath, leaseOffset)])
Beispiel #38
0
 def getImageVolumes(cls, repoPath, sdUUID, imgUUID):
     """
     Fetch the list of the Volumes UUIDs, not including the shared base (template)
     """
     # Get Volumes of an image
     pattern = os.path.join(os.path.join(repoPath, sdUUID, sd.DOMAIN_IMAGES, imgUUID, "*.meta"))
     files = oop.getProcessPool(sdUUID).glob.glob(pattern)
     volList = []
     for i in files:
         volid = os.path.splitext(os.path.basename(i))[0]
         if sdCache.produce(sdUUID).produceVolume(imgUUID, volid).getImage() == imgUUID:
             volList.append(volid)
     return volList
Beispiel #39
0
 def parentVolumeRollback(cls, taskObj, sdUUID, pimgUUID, pvolUUID):
     cls.log.info("parentVolumeRollback: sdUUID=%s pimgUUID=%s"
                  " pvolUUID=%s" % (sdUUID, pimgUUID, pvolUUID))
     if pvolUUID != sc.BLANK_UUID and pimgUUID != sc.BLANK_UUID:
         pvol = sdCache.produce(sdUUID).produceVolume(pimgUUID,
                                                      pvolUUID)
         pvol.prepare()
         try:
             pvol.recheckIfLeaf()
         except Exception:
             cls.log.error("Unexpected error", exc_info=True)
         finally:
             pvol.teardown(sdUUID, pvolUUID)
Beispiel #40
0
    def validateVolumePath(self):
        """
        In file volume repositories,
        the volume file and the volume md must exists after the image/volume is created.
        """
        self.log.debug("validate path for %s" % self.volUUID)
        if not self.imagePath:
            self.validateImagePath()
        volPath = os.path.join(self.imagePath, self.volUUID)
        if not self.oop.fileUtils.pathExists(volPath):
            raise se.VolumeDoesNotExist(self.volUUID)

        self.volumePath = volPath
        if not sdCache.produce(self.sdUUID).isISO():
            self.validateMetaVolumePath()
Beispiel #41
0
    def validateVolumePath(self):
        """
        In file volume repositories,
        the volume file and the volume md must exists after the image/volume is created.
        """
        self.log.debug("validate path for %s" % self.volUUID)
        if not self.imagePath:
            self.validateImagePath()
        volPath = os.path.join(self.imagePath, self.volUUID)
        if not self.oop.fileUtils.pathExists(volPath):
            raise se.VolumeDoesNotExist(self.volUUID)

        self.volumePath = volPath
        if not sdCache.produce(self.sdUUID).isISO():
            self.validateMetaVolumePath()
Beispiel #42
0
    def _share(self, dstImgPath):
        """
        Share this volume to dstImgPath, including the metadata and the lease
        """
        dstVolPath = os.path.join(dstImgPath, self.volUUID)
        dstMetaPath = self._getMetaVolumePath(dstVolPath)

        self.log.debug("Share volume %s to %s", self.volUUID, dstImgPath)
        self.oop.utils.forceLink(self.getVolumePath(), dstVolPath)

        self.log.debug("Share volume metadata of %s to %s", self.volUUID, dstImgPath)
        self.oop.utils.forceLink(self._getMetaVolumePath(), dstMetaPath)

        # Link the lease file if the domain uses sanlock
        if sdCache.produce(self.sdUUID).hasVolumeLeases():
            self._shareLease(dstImgPath)
Beispiel #43
0
 def getImageVolumes(cls, repoPath, sdUUID, imgUUID):
     """
     Fetch the list of the Volumes UUIDs,
     not including the shared base (template)
     """
     # Get Volumes of an image
     pattern = os.path.join(repoPath, sdUUID, sd.DOMAIN_IMAGES, imgUUID,
                            "*.meta")
     files = oop.getProcessPool(sdUUID).glob.glob(pattern)
     volList = []
     for i in files:
         volid = os.path.splitext(os.path.basename(i))[0]
         if (sdCache.produce(sdUUID).produceVolume(
                 imgUUID, volid).getImage() == imgUUID):
             volList.append(volid)
     return volList
Beispiel #44
0
    def getVmVolumeInfo(self):
        """
        Send info to represent Gluster volume as a network block device
        """
        rpath = sdCache.produce(self.sdUUID).getRemotePath()
        rpath_list = rpath.rsplit(":", 1)
        volfileServer = rpath_list[0]
        volname = rpath_list[1]

        # Volume transport to Libvirt transport mapping
        VOLUME_TRANS_MAP = {'TCP': 'tcp', 'RDMA': 'rdma'}

        # Extract the volume's transport using gluster cli
        svdsmProxy = svdsm.getProxy()

        try:
            volInfo = svdsmProxy.glusterVolumeInfo(volname, volfileServer)
            volTrans = VOLUME_TRANS_MAP[volInfo[volname]['transportType'][0]]
        except GlusterException:
            # In case of issues with finding transport type, default to tcp
            self.log.warning(
                "Unable to find transport type for GlusterFS"
                " volume %s. GlusterFS server = %s."
                "Defaulting to tcp", (volname, volfileServer),
                exc_info=True)
            volTrans = VOLUME_TRANS_MAP['TCP']

        # Use default port
        volPort = "0"

        imgFilePath = self.getVolumePath()
        imgFilePath_list = imgFilePath.rsplit("/")

        # Extract path to the image, relative to the gluster mount
        imgFileRelPath = "/".join(imgFilePath_list[-4:])

        glusterPath = volname + '/' + imgFileRelPath

        return {
            'volType': VmVolumeInfo.TYPE_NETWORK,
            'path': glusterPath,
            'protocol': 'gluster',
            'volPort': volPort,
            'volTransport': volTrans,
            'volfileServer': volfileServer
        }
Beispiel #45
0
    def createVolumeRollback(cls, taskObj, repoPath, sdUUID, imgUUID, volUUID,
                             imageDir):
        cls.log.info("createVolumeRollback: repoPath=%s sdUUID=%s imgUUID=%s "
                     "volUUID=%s imageDir=%s" %
                     (repoPath, sdUUID, imgUUID, volUUID, imageDir))
        vol = sdCache.produce(sdUUID).produceVolume(imgUUID, volUUID)
        pvol = vol.getParentVolume()
        # Remove volume
        vol.delete(postZero=False, force=True)
        if len(cls.getImageVolumes(repoPath, sdUUID, imgUUID)):
            # Don't remove the image folder itself
            return

        if not pvol or pvol.isShared():
            # Remove image folder with all leftovers
            if os.path.exists(imageDir):
                fileUtils.cleanupdir(imageDir)
Beispiel #46
0
    def _share(self, dstImgPath):
        """
        Share this volume to dstImgPath, including the metadata and the lease
        """
        dstVolPath = os.path.join(dstImgPath, self.volUUID)
        dstMetaPath = self._getMetaVolumePath(dstVolPath)

        self.log.debug("Share volume %s to %s", self.volUUID, dstImgPath)
        self.oop.utils.forceLink(self.getVolumePath(), dstVolPath)

        self.log.debug("Share volume metadata of %s to %s", self.volUUID,
                       dstImgPath)
        self.oop.utils.forceLink(self._getMetaVolumePath(), dstMetaPath)

        # Link the lease file if the domain uses sanlock
        if sdCache.produce(self.sdUUID).hasVolumeLeases():
            self._shareLease(dstImgPath)
Beispiel #47
0
    def createVolumeRollback(cls, taskObj, repoPath,
                             sdUUID, imgUUID, volUUID, imageDir):
        cls.log.info("createVolumeRollback: repoPath=%s sdUUID=%s imgUUID=%s "
                     "volUUID=%s imageDir=%s" %
                     (repoPath, sdUUID, imgUUID, volUUID, imageDir))
        vol = sdCache.produce(sdUUID).produceVolume(imgUUID, volUUID)
        pvol = vol.getParentVolume()
        # Remove volume
        vol.delete(postZero=False, force=True)
        if len(cls.getImageVolumes(repoPath, sdUUID, imgUUID)):
            # Don't remove the image folder itself
            return

        if not pvol or pvol.isShared():
            # Remove image folder with all leftovers
            if os.path.exists(imageDir):
                fileUtils.cleanupdir(imageDir)
Beispiel #48
0
    def _monitorDomain(self):
        self.nextStatus.clear()

        if time() - self.lastRefresh > self.refreshTime:
            # Refreshing the domain object in order to pick up changes as,
            # for example, the domain upgrade.
            self.log.debug("Refreshing domain %s", self.sdUUID)
            sdCache.manuallyRemoveDomain(self.sdUUID)
            self.lastRefresh = time()

        try:
            # We should produce the domain inside the monitoring loop because
            # it might take some time and we don't want to slow down the thread
            # start (and anything else that relies on that as for example
            # updateMonitoringThreads). It also needs to be inside the loop
            # since it might fail and we want keep trying until we succeed or
            # the domain is deactivated.
            if self.domain is None:
                self.domain = sdCache.produce(self.sdUUID)

            if self.isIsoDomain is None:
                # The isIsoDomain assignment is delayed because the isoPrefix
                # discovery might fail (if the domain suddenly disappears) and
                # we could risk to never try to set it again.
                isIsoDomain = self.domain.isISO()
                if isIsoDomain:
                    self.isoPrefix = self.domain.getIsoDomainImagesDir()
                self.isIsoDomain = isIsoDomain

            self.domain.selftest()

            self.nextStatus.readDelay = self.domain.getReadDelay()

            stats = self.domain.getStats()
            self.nextStatus.diskUtilization = (stats["disktotal"],
                                               stats["diskfree"])

            self.nextStatus.vgMdUtilization = (stats["mdasize"],
                                               stats["mdafree"])

            self.nextStatus.vgMdHasEnoughFreeSpace = stats["mdavalid"]
            self.nextStatus.vgMdFreeBelowThreashold = stats["mdathreshold"]

            masterStats = self.domain.validateMaster()
            self.nextStatus.masterValid = masterStats['valid']
            self.nextStatus.masterMounted = masterStats['mount']

            self.nextStatus.hasHostId = self.domain.hasHostId(self.hostId)
            self.nextStatus.isoPrefix = self.isoPrefix
            self.nextStatus.version = self.domain.getVersion()

        except Exception as e:
            self.log.error(
                "Error while collecting domain %s monitoring "
                "information",
                self.sdUUID,
                exc_info=True)
            self.nextStatus.error = e

        self.nextStatus.checkTime = time()
        self.nextStatus.valid = (self.nextStatus.error is None)

        if self._statusDidChange():
            self.log.debug("Domain %s changed its status to %s", self.sdUUID,
                           "Valid" if self.nextStatus.valid else "Invalid")

            try:
                self.domainMonitor.onDomainStateChange.emit(
                    self.sdUUID, self.nextStatus.valid)
            except:
                self.log.warn("Could not emit domain state change event",
                              exc_info=True)

        self.firstChange = False

        # An ISO domain can be shared by multiple pools
        if (not self.isIsoDomain and self.nextStatus.valid
                and self.nextStatus.hasHostId is False):
            try:
                self.domain.acquireHostId(self.hostId, async=True)
            except:
                self.log.debug(
                    "Unable to issue the acquire host id %s "
                    "request for domain %s",
                    self.hostId,
                    self.sdUUID,
                    exc_info=True)

        self.status.update(self.nextStatus)
Beispiel #49
0
    def __getResourceCandidatesList(self, resourceName, lockType):
        """
        Return list of lock candidates (template and volumes)
        """
        volResourcesList = []
        template = None
        dom = sdCache.produce(sdUUID=self.sdUUID)
        # Get the list of the volumes
        repoPath = os.path.join(self.storage_repository, dom.getPools()[0])
        try:
            chain = image.Image(repoPath).getChain(sdUUID=self.sdUUID,
                                                   imgUUID=resourceName)
        except se.ImageDoesNotExistInSD:
            log.debug("Image %s does not exist in domain %s", resourceName,
                      self.sdUUID)
            return []

        # check if the chain is build above a template, or it is a standalone
        pvol = chain[0].getParentVolume()
        if pvol:
            template = pvol.volUUID
        elif chain[0].isShared():
            # Image of template itself,
            # with no other volumes in chain
            template = chain[0].volUUID
            del chain[:]

        volUUIDChain = [vol.volUUID for vol in chain]
        volUUIDChain.sort()

        # Activate all volumes in chain at once.
        # We will attempt to activate all volumes again down to the flow with
        # no consequence, since they are already active.
        # TODO Fix resource framework to hold images, instead of specific vols.
        # This assumes that chains can not spread into more than one SD.
        if dom.__class__.__name__ == "BlockStorageDomain":
            lvm.activateLVs(self.sdUUID, volUUIDChain)

        failed = False
        # Acquire template locks:
        # - 'lockType' for template's image itself
        # - Always 'shared' lock for image based on template
        try:
            if template:
                if len(volUUIDChain) > 0:
                    volRes = rm.acquireResource(
                        self.volumeResourcesNamespace,
                        template,
                        rm.SHARED,
                        timeout=self.resource_default_timeout)
                else:
                    volRes = rm.acquireResource(
                        self.volumeResourcesNamespace,
                        template,
                        lockType,
                        timeout=self.resource_default_timeout)
                volResourcesList.append(volRes)

            # Acquire 'lockType' volume locks
            for volUUID in volUUIDChain:
                volRes = rm.acquireResource(
                    self.volumeResourcesNamespace,
                    volUUID,
                    lockType,
                    timeout=self.resource_default_timeout)

                volResourcesList.append(volRes)
        except (rm.RequestTimedOutError, se.ResourceAcqusitionFailed) as e:
            log.debug("Cannot acquire volume resource (%s)", str(e))
            failed = True
            raise
        except Exception:
            log.debug("Cannot acquire volume resource", exc_info=True)
            failed = True
            raise
        finally:
            if failed:
                # Release already acquired template/volumes locks
                for volRes in volResourcesList:
                    volRes.release()

        return volResourcesList
Beispiel #50
0
    def create(cls,
               repoPath,
               sdUUID,
               imgUUID,
               size,
               volFormat,
               preallocate,
               diskType,
               volUUID,
               desc,
               srcImgUUID,
               srcVolUUID,
               initialSize=None):
        """
        Create a new volume with given size or snapshot
            'size' - in sectors
            'volFormat' - volume format COW / RAW
            'preallocate' - Preallocate / Sparse
            'diskType' - enum (API.Image.DiskTypes)
            'srcImgUUID' - source image UUID
            'srcVolUUID' - source volume UUID
            'initialSize' - initial volume size in sectors,
                            in case of thin provisioning
        """
        dom = sdCache.produce(sdUUID)
        dom.validateCreateVolumeParams(volFormat,
                                       srcVolUUID,
                                       preallocate=preallocate)

        imgPath = image.Image(repoPath).create(sdUUID, imgUUID)

        volPath = os.path.join(imgPath, volUUID)
        volParent = None
        volType = sc.type2name(sc.LEAF_VOL)

        # Get the specific class name and class module to be used in the
        # Recovery tasks.
        clsModule, clsName = cls._getModuleAndClass()

        try:
            if srcVolUUID != sc.BLANK_UUID:
                # When the srcImgUUID isn't specified we assume it's the same
                # as the imgUUID
                if srcImgUUID == sc.BLANK_UUID:
                    srcImgUUID = imgUUID

                volParent = cls(repoPath, sdUUID, srcImgUUID, srcVolUUID)

                if not volParent.isLegal():
                    raise se.createIllegalVolumeSnapshotError(
                        volParent.volUUID)

                if imgUUID != srcImgUUID:
                    volParent.share(imgPath)
                    volParent = cls(repoPath, sdUUID, imgUUID, srcVolUUID)

                # Override the size with the size of the parent
                size = volParent.getSize()

        except se.StorageException:
            cls.log.error("Unexpected error", exc_info=True)
            raise
        except Exception as e:
            cls.log.error("Unexpected error", exc_info=True)
            raise se.VolumeCannotGetParent(
                "Couldn't get parent %s for volume %s: %s" %
                (srcVolUUID, volUUID, e))

        try:
            cls.log.info("Creating volume %s", volUUID)

            # Rollback sentinel to mark the start of the task
            vars.task.pushRecovery(
                task.Recovery(task.ROLLBACK_SENTINEL, clsModule, clsName,
                              "startCreateVolumeRollback",
                              [sdUUID, imgUUID, volUUID]))

            # Create volume rollback
            vars.task.pushRecovery(
                task.Recovery("Halfbaked volume rollback", clsModule, clsName,
                              "halfbakedVolumeRollback",
                              [sdUUID, volUUID, volPath]))

            # Specific volume creation (block, file, etc...)
            try:
                metaId = cls._create(dom,
                                     imgUUID,
                                     volUUID,
                                     size,
                                     volFormat,
                                     preallocate,
                                     volParent,
                                     srcImgUUID,
                                     srcVolUUID,
                                     volPath,
                                     initialSize=initialSize)
            except (se.VolumeAlreadyExists, se.CannotCreateLogicalVolume,
                    se.VolumeCreationError, se.InvalidParameterException) as e:
                cls.log.error("Failed to create volume %s: %s", volPath, e)
                vars.task.popRecovery()
                raise
            # When the volume format is raw what the guest sees is the apparent
            # size of the file/device therefore if the requested size doesn't
            # match the apparent size (eg: physical extent granularity in LVM)
            # we need to update the size value so that the metadata reflects
            # the correct state.
            if volFormat == sc.RAW_FORMAT:
                apparentSize = int(
                    dom.getVSize(imgUUID, volUUID) / sc.BLOCK_SIZE)
                if apparentSize < size:
                    cls.log.error(
                        "The volume %s apparent size %s is smaller "
                        "than the requested size %s", volUUID, apparentSize,
                        size)
                    raise se.VolumeCreationError()
                if apparentSize > size:
                    cls.log.info(
                        "The requested size for volume %s doesn't "
                        "match the granularity on domain %s, "
                        "updating the volume size from %s to %s", volUUID,
                        sdUUID, size, apparentSize)
                    size = apparentSize

            vars.task.pushRecovery(
                task.Recovery("Create volume metadata rollback", clsModule,
                              clsName, "createVolumeMetadataRollback",
                              map(str, metaId)))

            cls.newMetadata(metaId, sdUUID, imgUUID, srcVolUUID, size,
                            sc.type2name(volFormat), sc.type2name(preallocate),
                            volType, diskType, desc, sc.LEGAL_VOL)

            if dom.hasVolumeLeases():
                cls.newVolumeLease(metaId, sdUUID, volUUID)

        except se.StorageException:
            cls.log.error("Unexpected error", exc_info=True)
            raise
        except Exception as e:
            cls.log.error("Unexpected error", exc_info=True)
            raise se.VolumeCreationError("Volume creation %s failed: %s" %
                                         (volUUID, e))

        # Remove the rollback for the halfbaked volume
        vars.task.replaceRecoveries(
            task.Recovery("Create volume rollback", clsModule, clsName,
                          "createVolumeRollback",
                          [repoPath, sdUUID, imgUUID, volUUID, imgPath]))

        return volUUID
Beispiel #51
0
 def getVTrueSize(cls, sdUUID, imgUUID, volUUID, bs=512):
     """
     Return allocated volume size
     """
     mysd = sdCache.produce(sdUUID=sdUUID)
     return mysd.getVolumeClass().getVTrueSize(mysd, imgUUID, volUUID, bs)