def expected_locks(src_vol, dst_vol): src_img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, src_vol.sdUUID) dst_img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, dst_vol.sdUUID) ret = [ # Domain lock for each volume rm.Lock(sc.STORAGE, src_vol.sdUUID, rm.SHARED), rm.Lock(sc.STORAGE, dst_vol.sdUUID, rm.SHARED), # Image lock for each volume, exclusive for the destination rm.Lock(src_img_ns, src_vol.imgUUID, rm.SHARED), rm.Lock(dst_img_ns, dst_vol.imgUUID, rm.EXCLUSIVE), # Volume lease for the destination volume volume.VolumeLease(0, dst_vol.sdUUID, dst_vol.imgUUID, dst_vol.volUUID) ] return ret
def expected_locks(self, src_vol, dst_vol): src_img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, src_vol.sdUUID) dst_img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, dst_vol.sdUUID) ret = [ # Domain lock for each volume rm.ResourceManagerLock(sc.STORAGE, src_vol.sdUUID, rm.SHARED), rm.ResourceManagerLock(sc.STORAGE, dst_vol.sdUUID, rm.SHARED), # Image lock for each volume, exclusive for the destination rm.ResourceManagerLock(src_img_ns, src_vol.imgUUID, rm.SHARED), rm.ResourceManagerLock(dst_img_ns, dst_vol.imgUUID, rm.EXCLUSIVE), # Volume lease for the destination volume volume.VolumeLease( 0, dst_vol.sdUUID, dst_vol.imgUUID, dst_vol.volUUID) ] return ret
def expected_locks(self, subchain): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, subchain.sd_id) return [ rm.ResourceManagerLock(sc.STORAGE, subchain.sd_id, rm.SHARED), rm.ResourceManagerLock(img_ns, subchain.img_id, rm.EXCLUSIVE), volume.VolumeLease(subchain.host_id, subchain.sd_id, subchain.img_id, subchain.base_id) ]
def expected_locks(self, subchain): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, subchain.sd_id) return [ rm.ResourceManagerLock(sc.STORAGE, subchain.sd_id, rm.SHARED), rm.ResourceManagerLock(img_ns, subchain.img_id, rm.EXCLUSIVE), volume.VolumeLease(subchain.host_id, subchain.sd_id, subchain.img_id, subchain.base_id) ]
def locks(self): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) ret = [rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED), rm.ResourceManagerLock(img_ns, self.img_id, rm.EXCLUSIVE)] dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): ret.append(volume.VolumeLease(self._host_id, self.sd_id, self.img_id, self.vol_id)) return ret
def locks(self): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) ret = [rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED), rm.ResourceManagerLock(img_ns, self.img_id, rm.EXCLUSIVE)] dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): # We take only the base lease since no other volumes are modified ret.append(volume.VolumeLease(self.host_id, self.sd_id, self.img_id, self.base_id)) return ret
def locks(self): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) mode = rm.EXCLUSIVE if self._writable else rm.SHARED ret = [rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED), rm.ResourceManagerLock(img_ns, self.img_id, mode)] if self._writable: dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): ret.append(volume.VolumeLease(self._host_id, self.sd_id, self.img_id, self.vol_id)) return ret
def locks(self): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) mode = rm.EXCLUSIVE if self._writable else rm.SHARED ret = [rm.Lock(sc.STORAGE, self.sd_id, rm.SHARED), rm.Lock(img_ns, self.img_id, mode)] if self._writable: dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): ret.append(volume.VolumeLease(self._host_id, self.sd_id, self.img_id, self.vol_id)) return ret
def expected_locks(self, base_vol): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, base_vol.sdUUID) ret = [ # Domain lock rm.ResourceManagerLock(sc.STORAGE, base_vol.sdUUID, rm.SHARED), # Image lock rm.ResourceManagerLock(img_ns, base_vol.imgUUID, rm.EXCLUSIVE), # Volume lease volume.VolumeLease(0, base_vol.sdUUID, base_vol.imgUUID, base_vol.volUUID) ] return ret
def _registerResourceNamespaces(self): """ Register resources namespaces and create factories for it. """ # Register image resource namespace imageResourceFactory = \ resourceFactories.ImageResourceFactory(self.sdUUID) imageResourcesNamespace = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sdUUID) try: rm.registerNamespace(imageResourcesNamespace, imageResourceFactory) except rm.NamespaceRegistered: self.log.debug("Resource namespace %s already registered", imageResourcesNamespace) volumeResourcesNamespace = rm.getNamespace(sc.VOLUME_NAMESPACE, self.sdUUID) try: rm.registerNamespace(volumeResourcesNamespace, rm.SimpleResourceFactory()) except rm.NamespaceRegistered: self.log.debug("Resource namespace %s already registered", volumeResourcesNamespace)
def _registerResourceNamespaces(self): """ Register resources namespaces and create factories for it. """ # Register image resource namespace imageResourceFactory = \ resourceFactories.ImageResourceFactory(self.sdUUID) imageResourcesNamespace = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sdUUID) try: rm.registerNamespace(imageResourcesNamespace, imageResourceFactory) except rm.NamespaceRegistered: self.log.debug("Resource namespace %s already registered", imageResourcesNamespace) volumeResourcesNamespace = rm.getNamespace(sc.VOLUME_NAMESPACE, self.sdUUID) try: rm.registerNamespace(volumeResourcesNamespace, rm.SimpleResourceFactory()) except rm.NamespaceRegistered: self.log.debug("Resource namespace %s already registered", volumeResourcesNamespace)
def _run(self): vol_format = sc.name2type(self.vol_info.vol_format) with self.sd_manifest.domain_lock(self.host_id): image_res_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_manifest.sdUUID) with rm.acquireResource(image_res_ns, self.vol_info.img_id, rm.EXCLUSIVE): artifacts = self.sd_manifest.get_volume_artifacts( self.vol_info.img_id, self.vol_info.vol_id) artifacts.create(self.vol_info.virtual_size, vol_format, self.vol_info.disk_type, self.vol_info.description, self.vol_info.parent, self.vol_info.initial_size) artifacts.commit()
def locks(self): # A shared lock is always required ret = [rm.Lock(sc.STORAGE, self.sd_id, rm.SHARED)] # An exclusive lock will be taken if source and destination images # are not the same, otherwise there will be a deadlock. if self.lock_image: img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) mode = rm.EXCLUSIVE if self._writable else rm.SHARED ret.append(rm.Lock(img_ns, self.img_id, mode)) if self._writable: dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): ret.append( volume.VolumeLease(self._host_id, self.sd_id, self.img_id, self.vol_id)) return ret
def test_create_volume(self): args = self._get_args() job = create_volume.Job(**args) with self._fake_env(): job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertIsNone(job.progress) self.assertNotIn('error', job.info()) # Verify that the domain lock was acquired and released self.assertEqual([('acquireDomainLock', (1, ), {}), ('releaseDomainLock', (), {})], args['sd_manifest'].__calls__) # Verify that the image resource was locked and released image_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, job.sd_manifest.sdUUID) rm_args = (image_ns, job.vol_info.img_id, rm.EXCLUSIVE) self.assertEqual([('acquireResource', rm_args, {}), ('releaseResource', rm_args, {})], self.rm.__calls__)
def teardown(cls, sdUUID, volUUID, justme=False): """ Deactivate volume and release resources. Volume deactivation occurs as part of resource releasing. If justme is false, the entire COW chain should be torn down. """ cls.log.info("Tearing down volume %s/%s justme %s" % (sdUUID, volUUID, justme)) lvmActivationNamespace = rm.getNamespace(sc.LVM_ACTIVATION_NAMESPACE, sdUUID) rm.releaseResource(lvmActivationNamespace, volUUID) if not justme: try: pvolUUID = getVolumeTag(sdUUID, volUUID, sc.TAG_PREFIX_PARENT) except Exception as e: # If storage not accessible or lvm error occurred # we will failure to get the parent volume. # We can live with it and still succeed in volume's teardown. pvolUUID = sc.BLANK_UUID cls.log.warn("Failure to get parent of volume %s/%s (%s)" % (sdUUID, volUUID, e)) if pvolUUID != sc.BLANK_UUID: cls.teardown(sdUUID=sdUUID, volUUID=pvolUUID, justme=False)
def teardown(cls, sdUUID, volUUID, justme=False): """ Deactivate volume and release resources. Volume deactivation occurs as part of resource releasing. If justme is false, the entire COW chain should be torn down. """ cls.log.info("Tearing down volume %s/%s justme %s" % (sdUUID, volUUID, justme)) lvmActivationNamespace = rm.getNamespace(sc.LVM_ACTIVATION_NAMESPACE, sdUUID) rm.releaseResource(lvmActivationNamespace, volUUID) if not justme: try: pvolUUID = getVolumeTag(sdUUID, volUUID, sc.TAG_PREFIX_PARENT) except Exception as e: # If storage not accessible or lvm error occurred # we will failure to get the parent volume. # We can live with it and still succeed in volume's teardown. pvolUUID = sc.BLANK_UUID cls.log.warn("Failure to get parent of volume %s/%s (%s)" % (sdUUID, volUUID, e)) if pvolUUID != sc.BLANK_UUID: cls.teardown(sdUUID=sdUUID, volUUID=pvolUUID, justme=False)
def __init__(self, repoPath, sdUUID, imgUUID, volUUID): volume.VolumeManifest.__init__(self, repoPath, sdUUID, imgUUID, volUUID) self.lvmActivationNamespace = rm.getNamespace( sc.LVM_ACTIVATION_NAMESPACE, self.sdUUID)
def __init__(self, sdUUID): rm.SimpleResourceFactory.__init__(self) self.sdUUID = sdUUID self.volumeResourcesNamespace = rm.getNamespace( sc.VOLUME_NAMESPACE, self.sdUUID)
def _createTargetImage(self, destDom, srcSdUUID, imgUUID): # Before actual data copying we need perform several operation # such as: create all volumes, create fake template if needed, ... try: # Find all volumes of source image srcChain = self.getChain(srcSdUUID, imgUUID) log_str = logutils.volume_chain_to_str(vol.volUUID for vol in srcChain) self.log.info("Source chain=%s ", log_str) except se.StorageException: self.log.error("Unexpected error", exc_info=True) raise except Exception as e: self.log.error("Unexpected error", exc_info=True) raise se.SourceImageActionError(imgUUID, srcSdUUID, str(e)) fakeTemplate = False pimg = sc.BLANK_UUID # standalone chain # check if the chain is build above a template, or it is a standalone pvol = srcChain[0].getParentVolume() if pvol: # find out parent volume parameters volParams = pvol.getVolumeParams() pimg = volParams['imgUUID'] # pimg == template image if destDom.isBackup(): # FIXME: This workaround help as copy VM to the backup domain # without its template. We will create fake template # for future VM creation and mark it as FAKE volume. # This situation is relevant for backup domain only. fakeTemplate = True @contextmanager def justLogIt(img): self.log.debug("You don't really need lock parent of image %s", img) yield dstImageResourcesNamespace = rm.getNamespace(sc.IMAGE_NAMESPACE, destDom.sdUUID) # In destination domain we need to lock image's template if exists with rm.acquireResource(dstImageResourcesNamespace, pimg, rm.SHARED) \ if pimg != sc.BLANK_UUID else justLogIt(imgUUID): if fakeTemplate: self.createFakeTemplate(destDom.sdUUID, volParams) dstChain = [] for srcVol in srcChain: # Create the dst volume try: # find out src volume parameters volParams = srcVol.getVolumeParams() # To avoid prezeroing preallocated volumes on NFS domains # we create the target as a sparse volume (since it will be # soon filled with the data coming from the copy) and then # we change its metadata back to the original value. if (destDom.supportsSparseness or volParams['volFormat'] != sc.RAW_FORMAT): tmpVolPreallocation = sc.SPARSE_VOL else: tmpVolPreallocation = sc.PREALLOCATED_VOL destDom.createVolume(imgUUID=imgUUID, capacity=volParams['capacity'], volFormat=volParams['volFormat'], preallocate=tmpVolPreallocation, diskType=volParams['disktype'], volUUID=srcVol.volUUID, desc=volParams['descr'], srcImgUUID=pimg, srcVolUUID=volParams['parent']) dstVol = destDom.produceVolume(imgUUID=imgUUID, volUUID=srcVol.volUUID) # Extend volume (for LV only) size to the actual size dstVol.extend(volParams['apparentsize']) # Change destination volume metadata to preallocated in # case we've used a sparse volume to accelerate the # volume creation if volParams['prealloc'] == sc.PREALLOCATED_VOL \ and tmpVolPreallocation != sc.PREALLOCATED_VOL: dstVol.setType(sc.PREALLOCATED_VOL) dstChain.append(dstVol) except se.StorageException: self.log.error("Unexpected error", exc_info=True) raise except Exception as e: self.log.error("Unexpected error", exc_info=True) raise se.DestImageActionError(imgUUID, destDom.sdUUID, str(e)) # only base may have a different parent image pimg = imgUUID return {'srcChain': srcChain, 'dstChain': dstChain}
def test_properties(self): a = volume.VolumeLease(HOST_ID, 'dom', 'img', 'vol') self.assertEqual(rm.getNamespace(sc.VOLUME_LEASE_NAMESPACE, 'dom'), a.ns) self.assertEqual('vol', a.name) self.assertEqual(rm.EXCLUSIVE, a.mode)
def test_properties(self): a = volume.VolumeLease(HOST_ID, 'dom', 'img', 'vol') assert rm.getNamespace(sc.VOLUME_LEASE_NAMESPACE, 'dom') == a.ns assert 'vol' == a.name assert rm.EXCLUSIVE == a.mode
def test_properties(self): a = volume.VolumeLease(HOST_ID, 'dom', 'img', 'vol') assert rm.getNamespace(sc.VOLUME_LEASE_NAMESPACE, 'dom') == a.ns assert 'vol' == a.name assert rm.EXCLUSIVE == a.mode
def __init__(self, sdUUID): rm.SimpleResourceFactory.__init__(self) self.sdUUID = sdUUID self.volumeResourcesNamespace = rm.getNamespace(sc.VOLUME_NAMESPACE, self.sdUUID)
def __init__(self, repoPath, sdUUID, imgUUID, volUUID): volume.VolumeManifest.__init__(self, repoPath, sdUUID, imgUUID, volUUID) self.lvmActivationNamespace = rm.getNamespace( sc.LVM_ACTIVATION_NAMESPACE, self.sdUUID)