def chain(self): if self._chain is None: dom = sdCache.produce_manifest(self.sd_id) repoPath = dom.getRepoPath() image_repo = image.Image(repoPath) chain = image_repo.getChain(self.sd_id, self.img_id) self._chain = [vol.volUUID for vol in chain] return self._chain
def prepare(self, writable=False): dom = sdCache.produce_manifest(self.sd_id) self._vol = dom.produceVolume(self.img_id, self.vol_id) self._vol.prepare(rw=writable, justme=True) try: yield finally: self._vol.teardown(self.sd_id, self.vol_id, justme=True)
def locks(self): img_ns = sd.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) ret = [rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED), rm.ResourceManagerLock(img_ns, self.img_id, rm.EXCLUSIVE)] dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): ret.append(volume.VolumeLease(self._host_id, self.sd_id, self.img_id, self.vol_id)) return ret
def _validate(self): if self._vol_info.volume.getFormat() != sc.COW_FORMAT: raise Error(self._vol_info.vol_id, "volume is not COW format") if self._vol_info.volume.isShared(): raise Error(self._vol_info.vol_id, "volume is shared") sd = sdCache.produce_manifest(self._vol_info.sd_id) if not sd.supports_qcow2_compat(self._qcow2_attr.compat): raise Error(self._vol_info.vol_id, "storage domain %s does not support compat %s" % (self._vol_info.sd_id, self._qcow2_attr.compat))
def locks(self): img_ns = sd.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) ret = [rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED), rm.ResourceManagerLock(img_ns, self.img_id, rm.EXCLUSIVE)] dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): # We take only the base lease since no other volumes are modified ret.append(volume.VolumeLease(self.host_id, self.sd_id, self.img_id, self.base_id)) return ret
def locks(self): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) ret = [ rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED), rm.ResourceManagerLock(img_ns, self.img_id, rm.EXCLUSIVE) ] dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): ret.append( volume.VolumeLease(self._host_id, self.sd_id, self.img_id, self.vol_id)) return ret
def _run(self): sd_manifest = sdCache.produce_manifest(self.params.sd_id) if not sd_manifest.supports_device_reduce(): raise se.StorageDomainVersionError( "move device not supported for domain version %s" % sd_manifest.getVersion()) # TODO: we assume at this point that the domain isn't active and can't # be activated - we need to ensure that. with rm.acquireResource(STORAGE, self.params.sd_id, rm.EXCLUSIVE): with sd_manifest.domain_id(self.host_id), \ sd_manifest.domain_lock(self.host_id): sd_manifest.movePV(self.params.src_guid, self.params.dst_guids)
def prepare(self): top_index = self.chain.index(self.top_id) chain_to_prepare = self.chain[:top_index + 1] dom = sdCache.produce_manifest(self.sd_id) for vol_id in chain_to_prepare: force = True if vol_id == self.base_id else False vol = dom.produceVolume(self.img_id, vol_id) # TODO: to improve this late to use subchain.top_vol # subchain.base_vol. vol.prepare(justme=True, force=force) try: yield finally: self.top_vol.teardown(self.sd_id, self.top_id)
def _extend_base_allocation(base_vol, top_vol): if not (base_vol.is_block() and base_vol.getFormat() == sc.COW_FORMAT): return base_alloc = base_vol.getVolumeSize(bs=1) top_alloc = top_vol.getVolumeSize(bs=1) vol_chunk_size = (config.getint('irs', 'volume_utilization_chunk_mb') * constants.MEGAB) potential_alloc = base_alloc + top_alloc + vol_chunk_size # TODO: add chunk_size only if top is leaf. capacity = base_vol.getSize() * sc.BLOCK_SIZE max_alloc = utils.round(capacity * sc.COW_OVERHEAD, constants.MEGAB) actual_alloc = min(potential_alloc, max_alloc) actual_alloc_mb = (actual_alloc + constants.MEGAB - 1) / constants.MEGAB dom = sdCache.produce_manifest(base_vol.sdUUID) dom.extendVolume(base_vol.volUUID, actual_alloc_mb)
def finalize(subchain): """ During finalize we distunguish between leaf merge and internal merge. In case of leaf merge, we only upate vdsm metadata, i.e. we call syncVolumeChain that marks the top volume as ILLEGAL. If the operation succeeds, the top volume is marked as ILLEGAL and will be removed by the engine. In case of failure, if the top volume is LEGAL, the user can recover by retrying cold merge. If the top volume is ILLEGAL, and the engine fails to delete the volume, a manual recovery is required. In case of internal merge, we need to update qcow metadata and vdsm metadata. For qcow metadata, we rebase top's child on base, and for vdsm metadata, we invoke syncVolumeChain that changes the child of the top to point to the base as its parent. As we would like to minimize the window where the top volume is ILLEGAL, we set it to ILLEGAL just before calling qemuimg rebase. After finalize internal merge, there are three possible states: 1. top volume illegal, qemu and vdsm chains updated. The operation will be finished by the engine deleting the top volume. 2. top volume is ILLEGAL but not rebased, both qemu chain and vdsm chain are synchronized. Manual recovery is possible by inspecting the chains and setting the top volume to legal. 3. top volume is ILLEGAL, qemu chain rebased, but vdsm chain wasn't modified or partly modified. Manual recovery is possible by updating vdsm chain. """ log.info("Finalizing subchain after merge: %s", subchain) with guarded.context(subchain.locks): # TODO: As each cold merge step - prepare, merge and finalize - # requires different volumes to be prepared, we will add a prepare # helper for each step. with subchain.prepare(): subchain.validate() dom = sdCache.produce_manifest(subchain.sd_id) if subchain.top_vol.isLeaf(): _finalize_leaf_merge(dom, subchain) else: _finalize_internal_merge(dom, subchain) if subchain.base_vol.chunked(): # optimal_size must be called when the volume is prepared optimal_size = subchain.base_vol.optimal_size() if subchain.base_vol.chunked(): _shrink_base_volume(subchain, optimal_size)
def finalize(subchain): log.info("Finalizing subchain after merge: %s", subchain) with guarded.context(subchain.locks): with subchain.prepare(): subchain.validate() # Base volume must be ILLEGAL. Otherwise, VM could be run while # performing cold merge. base_legality = subchain.base_vol.getLegality() if base_legality == sc.LEGAL_VOL: raise se.UnexpectedVolumeState( subchain.base_id, sc.ILLEGAL_VOL, base_legality) dom = sdCache.produce_manifest(subchain.sd_id) _update_qemu_metadata(dom, subchain) _update_vdsm_metadata(dom, subchain) subchain.base_vol.setLegality(sc.LEGAL_VOL)
def chain(self): if self._chain is None: dom = sdCache.produce_manifest(self.sd_id) repoPath = dom.getRepoPath() image_repo = image.Image(repoPath) chain = image_repo.getChain(self.sd_id, self.img_id) # When the VM is cloned from a template, the root volume of the # volumes chain is a shared volume. Shared volumes are not returned # in the volumes list when calling Image.getChain hence, we have to # add that volume manually. template = chain[0].getParentVolume() if template is not None: if not template.isShared(): raise se.UnexpectedVolumeState( template.volUUID, "Shared", "Not Shared") chain.insert(0, template) self._chain = [vol.volUUID for vol in chain] return self._chain
def volume(self): if self._vol is None: dom = sdCache.produce_manifest(self.sd_id) self._vol = dom.produceVolume(self.img_id, self.vol_id) return self._vol
def qcow2_compat(self): dom = sdCache.produce_manifest(self.sd_id) return dom.qcow2_compat()