Пример #1
0
 def newVolumeLease(cls, metaId, sdUUID, volUUID):
     cls.log.debug(
         "Initializing volume lease volUUID=%s sdUUID=%s, "
         "metaId=%s", volUUID, sdUUID, metaId)
     _, slot = metaId
     sd = sdCache.produce_manifest(sdUUID)
     sd.create_volume_lease(slot, volUUID)
Пример #2
0
    def _putMetadata(cls, metaId, meta, **overrides):
        sd = sdCache.produce_manifest(meta.domain)

        _, slot = metaId

        data = meta.storage_format(sd.getVersion(), **overrides)
        data = data.ljust(sc.METADATA_SIZE, b"\0")
        sd.write_metadata_block(slot, data)
Пример #3
0
 def createVolumeMetadataRollback(cls, taskObj, sdUUID, slot_str):
     """
     This function is called only from tasks framework, with strings values.
     """
     cls.log.info("Metadata rollback for sdUUID=%s slot=%s", sdUUID,
                  slot_str)
     sd = sdCache.produce_manifest(sdUUID)
     sd.clear_metadata_block(int(slot_str))
Пример #4
0
    def _putMetadata(cls, metaId, meta, **overrides):
        sd = sdCache.produce_manifest(meta.domain)

        _, slot = metaId

        data = meta.storage_format(sd.getVersion(), **overrides)
        data += "\0" * (sc.METADATA_SIZE - len(data))
        sd.write_metadata_block(slot, data)
Пример #5
0
 def getVolumeSize(self):
     """
     Return the volume size in bytes.
     """
     # Just call the SD Manifest method getVSize() - apparently it does what
     # we need. We consider incurred overhead of producing the object
     # to be a small price for code de-duplication.
     manifest = sdCache.produce_manifest(self.sdUUID)
     return manifest.getVSize(self.imgUUID, self.volUUID)
Пример #6
0
 def locks(self):
     img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id)
     ret = [rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED),
            rm.ResourceManagerLock(img_ns, self.img_id, rm.EXCLUSIVE)]
     dom = sdCache.produce_manifest(self.sd_id)
     if dom.hasVolumeLeases():
         ret.append(volume.VolumeLease(self._host_id, self.sd_id,
                                       self.img_id, self.vol_id))
     return ret
Пример #7
0
 def getVolumeSize(self, bs=BLOCK_SIZE):
     """
     Return the volume size in blocks
     """
     # Just call the SD Manifest method getVSize() - apparently it does what
     # we need. We consider incurred overhead of producing the object
     # to be a small price for code de-duplication.
     manifest = sdCache.produce_manifest(self.sdUUID)
     return int(manifest.getVSize(self.imgUUID, self.volUUID) // bs)
Пример #8
0
 def _validate(self):
     if self._vol_info.volume.getFormat() != sc.COW_FORMAT:
         raise Error(self._vol_info.vol_id, "volume is not COW format")
     if self._vol_info.volume.isShared():
         raise Error(self._vol_info.vol_id, "volume is shared")
     sd = sdCache.produce_manifest(self._vol_info.sd_id)
     if not sd.supports_qcow2_compat(self._qcow2_attr.compat):
         raise Error(self._vol_info.vol_id,
                     "storage domain %s does not support compat %s" %
                     (self._vol_info.sd_id, self._qcow2_attr.compat))
Пример #9
0
 def locks(self):
     img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id)
     ret = [rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED),
            rm.ResourceManagerLock(img_ns, self.img_id, rm.EXCLUSIVE)]
     dom = sdCache.produce_manifest(self.sd_id)
     if dom.hasVolumeLeases():
         # We take only the base lease since no other volumes are modified
         ret.append(volume.VolumeLease(self.host_id, self.sd_id,
                                       self.img_id, self.base_id))
     return ret
Пример #10
0
 def locks(self):
     img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id)
     mode = rm.EXCLUSIVE if self._writable else rm.SHARED
     ret = [rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED),
            rm.ResourceManagerLock(img_ns, self.img_id, mode)]
     if self._writable:
         dom = sdCache.produce_manifest(self.sd_id)
         if dom.hasVolumeLeases():
             ret.append(volume.VolumeLease(self._host_id, self.sd_id,
                                           self.img_id, self.vol_id))
     return ret
Пример #11
0
 def locks(self):
     img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id)
     mode = rm.EXCLUSIVE if self._writable else rm.SHARED
     ret = [rm.Lock(sc.STORAGE, self.sd_id, rm.SHARED),
            rm.Lock(img_ns, self.img_id, mode)]
     if self._writable:
         dom = sdCache.produce_manifest(self.sd_id)
         if dom.hasVolumeLeases():
             ret.append(volume.VolumeLease(self._host_id, self.sd_id,
                                           self.img_id, self.vol_id))
     return ret
Пример #12
0
    def _putMetadata(cls, metaId, meta):
        vgname, offs = metaId

        data = cls.formatMetadata(meta)
        data += "\0" * (sc.METADATA_SIZE - len(data))

        sd = sdCache.produce_manifest(vgname)
        metavol = sd.metadata_volume_path()
        with directio.DirectFile(metavol, "r+") as f:
            f.seek(offs * sc.METADATA_SIZE)
            f.write(data)
Пример #13
0
def finalize(subchain):
    """
    During finalize we distunguish between leaf merge and internal merge.

    In case of leaf merge, we only upate vdsm metadata, i.e. we call
    syncVolumeChain that marks the top volume as ILLEGAL. If the operation
    succeeds, the top volume is marked as ILLEGAL and will be removed by the
    engine. In case of failure, if the top volume is LEGAL, the user can
    recover by retrying cold merge. If the top volume is ILLEGAL, and the
    engine fails to delete the volume, a manual recovery is required.

    In case of internal merge, we need to update qcow metadata and vdsm
    metadata. For qcow metadata, we rebase top's child on base, and for vdsm
    metadata, we invoke syncVolumeChain that changes the child of the top to
    point to the base as its parent.  As we would like to minimize the window
    where the top volume is ILLEGAL, we set it to ILLEGAL just before calling
    qemuimg rebase.

    After finalize internal merge, there are three possible states:
    1. top volume illegal, qemu and vdsm chains updated. The operation will be
       finished by the engine deleting the top volume.
    2. top volume is ILLEGAL but not rebased, both qemu chain and vdsm chain
       are synchronized. Manual recovery is possible by inspecting the chains
       and setting the top volume to legal.
    3. top volume is ILLEGAL, qemu chain rebased, but vdsm chain wasn't
       modified or partly modified. Manual recovery is possible by updating
       vdsm chain.
    """
    log.info("Finalizing subchain after merge: %s", subchain)
    with guarded.context(subchain.locks):
        # TODO: As each cold merge step - prepare, merge and finalize -
        # requires different volumes to be prepared, we will add a prepare
        # helper for each step.
        with subchain.prepare():
            subchain.validate()
            dom = sdCache.produce_manifest(subchain.sd_id)
            if subchain.top_vol.isLeaf():
                _finalize_leaf_merge(dom, subchain)
            else:
                _finalize_internal_merge(dom, subchain)

            if subchain.base_vol.can_reduce():
                # If the top volume is leaf, the base volume will become a leaf
                # after the top volume is deleted.
                optimal_size = subchain.base_vol.optimal_size(
                    as_leaf=subchain.top_vol.isLeaf())
                actual_size = subchain.base_vol.getVolumeSize()

        # Optimal size must be computed while the image is prepared, but
        # reducing with the volume still active will issue a warning from LVM.
        # Thus, reduce after having teardown the volume.
        if subchain.base_vol.can_reduce() and optimal_size < actual_size:
            _shrink_base_volume(subchain, optimal_size)
Пример #14
0
    def _putMetadata(cls, metaId, meta, **overrides):
        volPath, = metaId
        metaPath = cls.metaVolumePath(volPath)

        sd = sdCache.produce_manifest(meta.domain)

        data = meta.storage_format(sd.getVersion(), **overrides)

        with open(metaPath + ".new", "w") as f:
            f.write(data)

        oop.getProcessPool(meta.domain).os.rename(metaPath + ".new", metaPath)
Пример #15
0
    def _putMetadata(cls, metaId, meta, **overrides):
        volPath, = metaId
        metaPath = cls.metaVolumePath(volPath)

        sd = sdCache.produce_manifest(meta.domain)

        data = meta.storage_format(sd.getVersion(), **overrides)

        with open(metaPath + ".new", "w") as f:
            f.write(data)

        oop.getProcessPool(meta.domain).os.rename(metaPath + ".new", metaPath)
Пример #16
0
    def _run(self):
        sd_manifest = sdCache.produce_manifest(self.params.sd_id)
        if not sd_manifest.supports_device_reduce():
            raise se.StorageDomainVersionError(
                "move device not supported for domain version %s" %
                sd_manifest.getVersion())

        # TODO: we assume at this point that the domain isn't active and can't
        # be activated - we need to ensure that.
        with rm.acquireResource(STORAGE, self.params.sd_id, rm.EXCLUSIVE):
            with sd_manifest.domain_id(self.host_id), \
                    sd_manifest.domain_lock(self.host_id):
                sd_manifest.movePV(self.params.src_guid, self.params.dst_guids)
Пример #17
0
    def _run(self):
        sd_manifest = sdCache.produce_manifest(self.params.sd_id)
        if not sd_manifest.supports_device_reduce():
            raise se.StorageDomainVersionError(
                "move device not supported for domain version %s" %
                sd_manifest.getVersion())

        # TODO: we assume at this point that the domain isn't active and can't
        # be activated - we need to ensure that.
        with rm.acquireResource(STORAGE, self.params.sd_id, rm.EXCLUSIVE):
            with sd_manifest.domain_id(self.host_id), \
                    sd_manifest.domain_lock(self.host_id):
                sd_manifest.movePV(self.params.src_guid, self.params.dst_guids)
Пример #18
0
    def volume_operation(self):
        dom = sdCache.produce_manifest(self.lease.sd_id)
        metadata = dom.get_lvb(self.lease.lease_id)
        log.info("Current lease %s metadata: %r", self.lease.sd_id, metadata)

        self._validate_metadata(metadata)
        try:
            yield
        except Exception:
            self._update_metadata(dom, metadata, sc.JOB_STATUS_FAILED)
            raise

        self._update_metadata(dom, metadata, sc.JOB_STATUS_SUCCEEDED)
Пример #19
0
    def _putMetadata(cls, metaId, meta, **overrides):
        volPath, = metaId
        metaPath = cls.metaVolumePath(volPath)

        sd = sdCache.produce_manifest(meta.domain)

        data = meta.storage_format(sd.getVersion(), **overrides)

        iop = oop.getProcessPool(meta.domain)
        tmpFilePath = metaPath + ".new"

        iop.writeFile(tmpFilePath, data)
        iop.os.rename(tmpFilePath, metaPath)
Пример #20
0
 def _run(self):
     sd_manifest = sdCache.produce_manifest(self.params.sd_id)
     if not sd_manifest.supports_device_reduce():
         raise se.UnsupportedOperation(
             "Storage domain does not support reduce operation",
             sdUUID=sd_manifest.sdUUID(),
             sdType=sd_manifest.getStorageType())
     # TODO: we assume at this point that the domain isn't active and can't
     # be activated - we need to ensure that.
     with rm.acquireResource(STORAGE, self.params.sd_id, rm.EXCLUSIVE):
         with sd_manifest.domain_id(self.host_id), \
                 sd_manifest.domain_lock(self.host_id):
             sd_manifest.reduceVG(self.params.guid)
Пример #21
0
    def newVolumeLease(cls, metaId, sdUUID, volUUID):
        cls.log.debug(
            "Initializing volume lease volUUID=%s sdUUID=%s, "
            "metaId=%s", volUUID, sdUUID, metaId)
        volPath = metaId[0]
        leasePath = cls.leaseVolumePath(volPath)
        oop.getProcessPool(sdUUID).truncateFile(leasePath, LEASE_FILEOFFSET)
        cls.file_setrw(leasePath, rw=True)

        manifest = sdCache.produce_manifest(sdUUID)
        sanlock.write_resource(sdUUID,
                               volUUID, [(leasePath, LEASE_FILEOFFSET)],
                               align=manifest.alignment,
                               sector=manifest.block_size)
Пример #22
0
    def validateImagePath(self):
        """
        Validate that the image dir exists and valid.
        In the file volume repositories,
        the image dir must exists after creation its first volume.
        """
        manifest = sdCache.produce_manifest(self.sdUUID)
        imageDir = manifest.getImageDir(self.imgUUID)

        if not self.oop.os.path.isdir(imageDir):
            raise se.ImagePathError(imageDir)
        if not self.oop.os.access(imageDir, os.R_OK | os.W_OK | os.X_OK):
            raise se.ImagePathError(imageDir)
        self._imagePath = imageDir
Пример #23
0
 def prepare(self):
     top_index = self.chain.index(self.top_id)
     chain_to_prepare = self.chain[:top_index + 1]
     dom = sdCache.produce_manifest(self.sd_id)
     for vol_id in chain_to_prepare:
         vol = dom.produceVolume(self.img_id, vol_id)
         rw = True if vol_id == self.base_id else False
         # TODO: to improve this late to use subchain.top_vol
         # subchain.base_vol.
         vol.prepare(rw=rw, justme=True)
     try:
         yield
     finally:
         self.top_vol.teardown(self.sd_id, self.top_id)
Пример #24
0
Файл: merge.py Проект: nirs/vdsm
 def prepare(self):
     top_index = self.chain.index(self.top_id)
     chain_to_prepare = self.chain[:top_index + 1]
     dom = sdCache.produce_manifest(self.sd_id)
     for vol_id in chain_to_prepare:
         vol = dom.produceVolume(self.img_id, vol_id)
         rw = True if vol_id == self.base_id else False
         # TODO: to improve this late to use subchain.top_vol
         # subchain.base_vol.
         vol.prepare(rw=rw, justme=True)
     try:
         yield
     finally:
         self.top_vol.teardown(self.sd_id, self.top_id)
Пример #25
0
    def validateImagePath(self):
        """
        Validate that the image dir exists and valid.
        In the file volume repositories,
        the image dir must exists after creation its first volume.
        """
        manifest = sdCache.produce_manifest(self.sdUUID)
        imageDir = manifest.getImageDir(self.imgUUID)

        if not self.oop.os.path.isdir(imageDir):
            raise se.ImagePathError(imageDir)
        if not self.oop.os.access(imageDir, os.R_OK | os.W_OK | os.X_OK):
            raise se.ImagePathError(imageDir)
        self._imagePath = imageDir
Пример #26
0
 def getImageVolumes(cls, sdUUID, imgUUID):
     """
     Fetch the list of the Volumes UUIDs,
     not including the shared base (template)
     """
     sd = sdCache.produce_manifest(sdUUID)
     img_dir = sd.getImageDir(imgUUID)
     pattern = os.path.join(glob_escape(img_dir), "*.meta")
     files = oop.getProcessPool(sdUUID).glob.glob(pattern)
     volList = []
     for i in files:
         volid = os.path.splitext(os.path.basename(i))[0]
         if (sd.produceVolume(imgUUID, volid).getImage() == imgUUID):
             volList.append(volid)
     return volList
Пример #27
0
 def getImageVolumes(cls, sdUUID, imgUUID):
     """
     Fetch the list of the Volumes UUIDs,
     not including the shared base (template)
     """
     sd = sdCache.produce_manifest(sdUUID)
     img_dir = sd.getImageDir(imgUUID)
     pattern = os.path.join(glob_escape(img_dir), "*.meta")
     files = oop.getProcessPool(sdUUID).glob.glob(pattern)
     volList = []
     for i in files:
         volid = os.path.splitext(os.path.basename(i))[0]
         if (sd.produceVolume(imgUUID, volid).getImage() == imgUUID):
             volList.append(volid)
     return volList
Пример #28
0
def _extend_base_allocation(base_vol, top_vol):
    if not (base_vol.is_block() and base_vol.getFormat() == sc.COW_FORMAT):
        return

    base_alloc = base_vol.getVolumeSize(bs=1)
    top_alloc = top_vol.getVolumeSize(bs=1)
    vol_chunk_size = (config.getint('irs', 'volume_utilization_chunk_mb') *
                      constants.MEGAB)
    potential_alloc = base_alloc + top_alloc + vol_chunk_size
    # TODO: add chunk_size only if top is leaf.
    capacity = base_vol.getSize() * sc.BLOCK_SIZE
    max_alloc = utils.round(capacity * sc.COW_OVERHEAD, constants.MEGAB)
    actual_alloc = min(potential_alloc, max_alloc)
    actual_alloc_mb = (actual_alloc + constants.MEGAB - 1) / constants.MEGAB
    dom = sdCache.produce_manifest(base_vol.sdUUID)
    dom.extendVolume(base_vol.volUUID, actual_alloc_mb)
Пример #29
0
Файл: merge.py Проект: nirs/vdsm
def finalize(subchain):
    """
    During finalize we distunguish between leaf merge and internal merge.

    In case of leaf merge, we only upate vdsm metadata, i.e. we call
    syncVolumeChain that marks the top volume as ILLEGAL. If the operation
    succeeds, the top volume is marked as ILLEGAL and will be removed by the
    engine. In case of failure, if the top volume is LEGAL, the user can
    recover by retrying cold merge. If the top volume is ILLEGAL, and the
    engine fails to delete the volume, a manual recovery is required.

    In case of internal merge, we need to update qcow metadata and vdsm
    metadata. For qcow metadata, we rebase top's child on base, and for vdsm
    metadata, we invoke syncVolumeChain that changes the child of the top to
    point to the base as its parent.  As we would like to minimize the window
    where the top volume is ILLEGAL, we set it to ILLEGAL just before calling
    qemuimg rebase.

    After finalize internal merge, there are three possible states:
    1. top volume illegal, qemu and vdsm chains updated. The operation will be
       finished by the engine deleting the top volume.
    2. top volume is ILLEGAL but not rebased, both qemu chain and vdsm chain
       are synchronized. Manual recovery is possible by inspecting the chains
       and setting the top volume to legal.
    3. top volume is ILLEGAL, qemu chain rebased, but vdsm chain wasn't
       modified or partly modified. Manual recovery is possible by updating
       vdsm chain.
    """
    log.info("Finalizing subchain after merge: %s", subchain)
    with guarded.context(subchain.locks):
        # TODO: As each cold merge step - prepare, merge and finalize -
        # requires different volumes to be prepared, we will add a prepare
        # helper for each step.
        with subchain.prepare():
            subchain.validate()
            dom = sdCache.produce_manifest(subchain.sd_id)
            if subchain.top_vol.isLeaf():
                _finalize_leaf_merge(dom, subchain)
            else:
                _finalize_internal_merge(dom, subchain)

            if subchain.base_vol.chunked():
                # optimal_size must be called when the volume is prepared
                optimal_size = subchain.base_vol.optimal_size()

        if subchain.base_vol.chunked():
            _shrink_base_volume(subchain, optimal_size)
Пример #30
0
    def validateVolumePath(self):
        """
        In file volume repositories,
        the volume file and the volume md must exists after
        the image/volume is created.
        """
        self.log.debug("validate path for %s" % self.volUUID)
        if not self.imagePath:
            self.validateImagePath()
        volPath = os.path.join(self.imagePath, self.volUUID)
        if not self.oop.fileUtils.pathExists(volPath):
            raise se.VolumeDoesNotExist(self.volUUID)

        self._volumePath = volPath
        sd = sdCache.produce_manifest(self.sdUUID)
        if not sd.isISO():
            self.validateMetaVolumePath()
Пример #31
0
    def getMetadata(self, metaId=None):
        """
        Get Meta data array of key,values lines
        """
        if not metaId:
            metaId = self.getMetadataId()

        _, slot = metaId
        sd = sdCache.produce_manifest(self.sdUUID)
        try:
            lines = sd.read_metadata_block(slot).splitlines()
        except Exception as e:
            self.log.error(e, exc_info=True)
            raise se.VolumeMetadataReadError("%s: %s" % (metaId, e))

        md = VolumeMetadata.from_lines(lines)
        return md
Пример #32
0
    def getMetadata(self, metaId=None):
        """
        Get Meta data array of key,values lines
        """
        if not metaId:
            metaId = self.getMetadataId()

        _, slot = metaId
        sd = sdCache.produce_manifest(self.sdUUID)
        try:
            lines = sd.read_metadata_block(slot).splitlines()
        except Exception as e:
            self.log.error(e, exc_info=True)
            raise se.VolumeMetadataReadError("%s: %s" % (metaId, e))

        md = VolumeMetadata.from_lines(lines)
        return md
Пример #33
0
    def validateVolumePath(self):
        """
        In file volume repositories,
        the volume file and the volume md must exists after
        the image/volume is created.
        """
        self.log.debug("validate path for %s" % self.volUUID)
        if not self.imagePath:
            self.validateImagePath()
        volPath = os.path.join(self.imagePath, self.volUUID)
        if not self.oop.fileUtils.pathExists(volPath):
            raise se.VolumeDoesNotExist(self.volUUID)

        self._volumePath = volPath
        sd = sdCache.produce_manifest(self.sdUUID)
        if not sd.isISO():
            self.validateMetaVolumePath()
Пример #34
0
Файл: merge.py Проект: nirs/vdsm
 def chain(self):
     if self._chain is None:
         dom = sdCache.produce_manifest(self.sd_id)
         repoPath = dom.getRepoPath()
         image_repo = image.Image(repoPath)
         chain = image_repo.getChain(self.sd_id, self.img_id)
         # When the VM is cloned from a template, the root volume of the
         # volumes chain is a shared volume. Shared volumes are not returned
         # in the volumes list when calling Image.getChain hence, we have to
         # add that volume manually.
         template = chain[0].getParentVolume()
         if template is not None:
             if not template.isShared():
                 raise se.UnexpectedVolumeState(
                     template.volUUID, "Shared", "Not Shared")
             chain.insert(0, template)
         self._chain = [vol.volUUID for vol in chain]
     return self._chain
Пример #35
0
    def getMetadata(self, metaId=None):
        """
        Get Meta data array of key,values lines
        """
        if not metaId:
            metaId = self.getMetadataId()

        _, offs = metaId
        sd = sdCache.produce_manifest(self.sdUUID)
        try:
            lines = misc.readblock(sd.metadata_volume_path(),
                                   offs * sc.METADATA_SIZE, sc.METADATA_SIZE)
        except Exception as e:
            self.log.error(e, exc_info=True)
            raise se.VolumeMetadataReadError("%s: %s" % (metaId, e))

        md = VolumeMetadata.from_lines(lines)
        return md.legacy_info()
Пример #36
0
 def chain(self):
     if self._chain is None:
         dom = sdCache.produce_manifest(self.sd_id)
         repoPath = dom.getRepoPath()
         image_repo = image.Image(repoPath)
         chain = image_repo.getChain(self.sd_id, self.img_id)
         # When the VM is cloned from a template, the root volume of the
         # volumes chain is a shared volume. Shared volumes are not returned
         # in the volumes list when calling Image.getChain hence, we have to
         # add that volume manually.
         template = chain[0].getParentVolume()
         if template is not None:
             if not template.isShared():
                 raise se.UnexpectedVolumeState(template.volUUID, "Shared",
                                                "Not Shared")
             chain.insert(0, template)
         self._chain = [vol.volUUID for vol in chain]
     return self._chain
Пример #37
0
    def locks(self):
        # A shared lock is always required
        ret = [rm.Lock(sc.STORAGE, self.sd_id, rm.SHARED)]

        # An exclusive lock will be taken if source and destination images
        # are not the same, otherwise there will be a deadlock.
        if self.lock_image:
            img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id)
            mode = rm.EXCLUSIVE if self._writable else rm.SHARED
            ret.append(rm.Lock(img_ns, self.img_id, mode))

        if self._writable:
            dom = sdCache.produce_manifest(self.sd_id)
            if dom.hasVolumeLeases():
                ret.append(
                    volume.VolumeLease(self._host_id, self.sd_id, self.img_id,
                                       self.vol_id))
        return ret
Пример #38
0
    def validateImagePath(self):
        """
        Block SD supports lazy image dir creation
        """
        manifest = sdCache.produce_manifest(self.sdUUID)
        imageDir = manifest.getImageDir(self.imgUUID)

        # Image directory may be a symlink to /run/vdsm/storage/sd/image
        # created when preparing an image before starting a vm.
        if os.path.islink(imageDir) and not os.path.exists(imageDir):
            self.log.warning("Removing stale image directory link %r",
                             imageDir)
            os.unlink(imageDir)

        if not os.path.isdir(imageDir):
            try:
                os.mkdir(imageDir, 0o755)
            except Exception:
                self.log.exception("Unexpected error")
                raise se.ImagePathError(imageDir)
        self._imagePath = imageDir
Пример #39
0
    def validateImagePath(self):
        """
        Block SD supports lazy image dir creation
        """
        manifest = sdCache.produce_manifest(self.sdUUID)
        imageDir = manifest.getImageDir(self.imgUUID)

        # Image directory may be a symlink to /run/vdsm/storage/sd/image
        # created when preparing an image before starting a vm.
        if os.path.islink(imageDir) and not os.path.exists(imageDir):
            self.log.warning("Removing stale image directory link %r",
                             imageDir)
            os.unlink(imageDir)

        if not os.path.isdir(imageDir):
            self.log.info("Creating image directory %r", imageDir)
            try:
                os.mkdir(imageDir, 0o755)
            except Exception:
                self.log.exception("Unexpected error")
                raise se.ImagePathError(imageDir)
        self._imagePath = imageDir
Пример #40
0
    def delete(self, postZero, force, discard):
        """ Delete volume
            'postZero' - zeroing file before deletion
            'force' is required to remove shared and internal volumes
            'discard' - discard lv before deletion
        """
        self.log.info("Request to delete LV %s of image %s in VG %s ",
                      self.volUUID, self.imgUUID, self.sdUUID)

        vol_path = self.getVolumePath()

        # On block storage domains we store a volume's parent UUID in two
        # places: 1) in the domain's metadata LV, and 2) in a LV tag attached
        # to the volume LV itself.  The LV tag is more efficient to access
        # than the domain metadata but it may only be updated by the SPM.
        #
        # This means that after a live merge completes the domain metadata LV
        # will be updated but the LV tag will not.  We can detect this case
        # here and fix the LV tag since this is an SPM verb.
        #
        # File domains do not have this complexity because the metadata is
        # stored in only one place and that metadata is updated by the HSM
        # host when the live merge finishes.
        sync = False
        for childID in self.getChildren():
            child = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID,
                                childID)
            metaParent = child.getParentMeta()
            tagParent = child.getParentTag()
            if metaParent != tagParent:
                self.log.debug(
                    "Updating stale PUUID LV tag from %s to %s for "
                    "volume %s", tagParent, metaParent, child.volUUID)
                child.setParentTag(metaParent)
                sync = True
        if sync:
            self.recheckIfLeaf()

        if not force:
            self.validateDelete()

        # Mark volume as illegal before deleting
        self.setLegality(sc.ILLEGAL_VOL)

        if postZero or discard:
            self.prepare(justme=True,
                         rw=True,
                         chainrw=force,
                         setrw=True,
                         force=True)
            try:
                if postZero:
                    blockdev.zero(vol_path, task=vars.task)

                if discard:
                    blockdev.discard(vol_path)
            finally:
                self.teardown(self.sdUUID, self.volUUID, justme=True)

        # try to cleanup as much as possible
        eFound = se.CannotDeleteVolume(self.volUUID)
        puuid = None
        try:
            # We need to blank parent record in our metadata
            # for parent to become leaf successfully.
            puuid = self.getParent()
            self.setParent(sc.BLANK_UUID)
            if puuid and puuid != sc.BLANK_UUID:
                pvol = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID,
                                   puuid)
                pvol.recheckIfLeaf()
        except Exception as e:
            eFound = e
            self.log.warning("cannot finalize parent volume %s",
                             puuid,
                             exc_info=True)

        # Basically, we want to mark the volume _remove_me at the beginning of
        # the delete; however, with the current delete logic, if marking the
        # volume fails, and the deleted volume is a leaf, we end up with a
        # chain with a valid leaf volume.
        # The ultimate solution of volume deletion requires changes in
        # image.syncVolumeChain to disconnect the volume from the chain,
        # and probably there mark it as _remove_me.
        manifest = sdCache.produce_manifest(self.sdUUID)
        manifest.markForDelVols(self.sdUUID, self.imgUUID, [self.volUUID],
                                sc.REMOVED_IMAGE_PREFIX)

        try:
            lvm.removeLVs(self.sdUUID, (self.volUUID, ))
        except se.CannotRemoveLogicalVolume as e:
            self.log.exception(
                "Failed to delete volume %s/%s. The "
                "logical volume must be removed manually.", self.sdUUID,
                self.volUUID)

        try:
            self.log.info("Unlinking %s", vol_path)
            os.unlink(vol_path)
            return True
        except Exception as e:
            eFound = e
            self.log.error("cannot delete volume's %s/%s link path: %s",
                           self.sdUUID,
                           self.volUUID,
                           vol_path,
                           exc_info=True)

        raise eFound
Пример #41
0
 def createVolumeMetadataRollback(cls, taskObj, sdUUID, slot):
     cls.log.info("Metadata rollback for sdUUID=%s slot=%s", sdUUID, slot)
     sd = sdCache.produce_manifest(sdUUID)
     sd.clear_metadata_block(slot)
Пример #42
0
 def qcow2_compat(self):
     dom = sdCache.produce_manifest(self.sd_id)
     return dom.qcow2_compat()
Пример #43
0
 def createVolumeMetadataRollback(cls, taskObj, sdUUID, slot):
     cls.log.info("Metadata rollback for sdUUID=%s slot=%s", sdUUID, slot)
     sd = sdCache.produce_manifest(sdUUID)
     sd.clear_metadata_block(slot)
Пример #44
0
 def preallocation(self):
     dom = sdCache.produce_manifest(self.sd_id)
     if (dom.supportsSparseness and
             self.volume.getType() == sc.PREALLOCATED_VOL):
         return qemuimg.PREALLOCATION.FALLOC
     return None
Пример #45
0
 def removeMetadata(self, metaId):
     """
     Just wipe meta.
     """
     _, slot = metaId
     sdCache.produce_manifest(self.sdUUID).clear_metadata_block(slot)
Пример #46
0
 def newVolumeLease(cls, metaId, sdUUID, volUUID):
     cls.log.debug("Initializing volume lease volUUID=%s sdUUID=%s, "
                   "metaId=%s", volUUID, sdUUID, metaId)
     _, slot = metaId
     sd = sdCache.produce_manifest(sdUUID)
     sd.create_volume_lease(slot, volUUID)
Пример #47
0
 def recommends_unordered_writes(self):
     dom = sdCache.produce_manifest(self.sd_id)
     return dom.recommends_unordered_writes(self.volume.getFormat())
Пример #48
0
Файл: merge.py Проект: nirs/vdsm
 def top_vol(self):
     if self._top_vol is None:
         dom = sdCache.produce_manifest(self.sd_id)
         self._top_vol = dom.produceVolume(self.img_id,
                                           self.top_id)
     return self._top_vol
Пример #49
0
 def removeMetadata(self, metaId):
     """
     Just wipe meta.
     """
     _, slot = metaId
     sdCache.produce_manifest(self.sdUUID).clear_metadata_block(slot)
Пример #50
0
    def delete(self, postZero, force, discard):
        """ Delete volume
            'postZero' - zeroing file before deletion
            'force' is required to remove shared and internal volumes
            'discard' - discard lv before deletion
        """
        self.log.info("Request to delete LV %s of image %s in VG %s ",
                      self.volUUID, self.imgUUID, self.sdUUID)

        vol_path = self.getVolumePath()
        slot = self.getMetaSlot()

        # On block storage domains we store a volume's parent UUID in two
        # places: 1) in the domain's metadata LV, and 2) in a LV tag attached
        # to the volume LV itself.  The LV tag is more efficient to access
        # than the domain metadata but it may only be updated by the SPM.
        #
        # This means that after a live merge completes the domain metadata LV
        # will be updated but the LV tag will not.  We can detect this case
        # here and fix the LV tag since this is an SPM verb.
        #
        # File domains do not have this complexity because the metadata is
        # stored in only one place and that metadata is updated by the HSM
        # host when the live merge finishes.
        sync = False
        for childID in self.getChildren():
            child = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID,
                                childID)
            metaParent = child.getParentMeta()
            tagParent = child.getParentTag()
            if metaParent != tagParent:
                self.log.debug("Updating stale PUUID LV tag from %s to %s for "
                               "volume %s", tagParent, metaParent,
                               child.volUUID)
                child.setParentTag(metaParent)
                sync = True
        if sync:
            self.recheckIfLeaf()

        if not force:
            self.validateDelete()

        # Mark volume as illegal before deleting
        self.setLegality(sc.ILLEGAL_VOL)

        if postZero or discard:
            self.prepare(justme=True, rw=True, chainrw=force, setrw=True,
                         force=True)
            try:
                if postZero:
                    blockdev.zero(vol_path, task=vars.task)

                if discard:
                    blockdev.discard(vol_path)
            finally:
                self.teardown(self.sdUUID, self.volUUID, justme=True)

        # try to cleanup as much as possible
        eFound = se.CannotDeleteVolume(self.volUUID)
        puuid = None
        try:
            # We need to blank parent record in our metadata
            # for parent to become leaf successfully.
            puuid = self.getParent()
            self.setParent(sc.BLANK_UUID)
            if puuid and puuid != sc.BLANK_UUID:
                pvol = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID,
                                   puuid)
                pvol.recheckIfLeaf()
        except Exception as e:
            eFound = e
            self.log.warning("cannot finalize parent volume %s", puuid,
                             exc_info=True)

        # Basically, we want to mark the volume _remove_me at the beginning of
        # the delete; however, with the current delete logic, if marking the
        # volume fails, and the deleted volume is a leaf, we end up with a
        # chain with a valid leaf volume.
        # The ultimate solution of volume deletion requires changes in
        # image.syncVolumeChain to disconnect the volume from the chain,
        # and probably there mark it as _remove_me.
        manifest = sdCache.produce_manifest(self.sdUUID)
        manifest.markForDelVols(self.sdUUID, self.imgUUID, [self.volUUID],
                                sc.REMOVED_IMAGE_PREFIX)

        try:
            lvm.removeLVs(self.sdUUID, self.volUUID)
        except se.CannotRemoveLogicalVolume as e:
            self.log.exception("Failed to delete volume %s/%s. The "
                               "logical volume must be removed manually.",
                               self.sdUUID, self.volUUID)
        else:
            # If removing the LV fails, we don't want to remove the
            # metadata. As the volume still exists on the storage, and is
            # accessible, removing the metadata will cause unexpected
            # errors when accessing the metadata that was wiped.  This is a
            # minimal solution for: https://bugzilla.redhat.com/1574631
            try:
                self.removeMetadata([self.sdUUID, slot])
            except se.VolumeMetadataWriteError as e:
                eFound = e
                self.log.exception("Failed to delete volume %s/%s metadata.",
                                   self.sdUUID, self.volUUID)

        try:
            self.log.info("Unlinking %s", vol_path)
            os.unlink(vol_path)
            return True
        except Exception as e:
            eFound = e
            self.log.error("cannot delete volume's %s/%s link path: %s",
                           self.sdUUID, self.volUUID, vol_path, exc_info=True)

        raise eFound
Пример #51
0
 def recommends_unordered_writes(self):
     dom = sdCache.produce_manifest(self.sd_id)
     return dom.recommends_unordered_writes(self.volume.getFormat())
Пример #52
0
 def top_vol(self):
     if self._top_vol is None:
         dom = sdCache.produce_manifest(self.sd_id)
         self._top_vol = dom.produceVolume(self.img_id, self.top_id)
     return self._top_vol