def test_calculate_initial_size_blk_file_raw_prealloc( self, storage, format, prealloc, estimate, expected): img = image.Image("/path") initial_size_blk = img.calculate_initial_size_blk( storage == "file", format, prealloc, estimate) assert initial_size_blk == expected
def test_raw_to_qcow2_estimated_size(self, monkeypatch, sd_class): monkeypatch.setattr(image, "config", CONFIG) monkeypatch.setattr( qemuimg, 'measure', # the estimated size for converting 1 gb # raw empty volume to qcow2 format # cmd: # qemu-img measure -f raw -O qcow2 test.raw # output: # required size: 393216 # fully allocated size: 1074135040 lambda **args: {"required": 393216}) monkeypatch.setattr(image, 'sdCache', FakeStorageDomainCache()) image.sdCache.domains['sdUUID'] = sd_class("fake manifest") img = image.Image("/path/to/repo") vol_params = dict(capacity=GiB, volFormat=sc.RAW_FORMAT, path='path', block=sd_class.is_block()) estimated_size = img.estimate_qcow2_size(vol_params, "sdUUID") assert estimated_size == 1074135040
def _update_vdsm_metadata(dom, subchain): orig_top_id = subchain.chain[-1] new_chain = subchain.chain[:] new_chain.remove(subchain.top_id) log.info("Updating Vdsm metadata, syncing new chain: %s", new_chain) repoPath = dom.getRepoPath() image_repo = image.Image(repoPath) image_repo.syncVolumeChain(subchain.sd_id, subchain.img_id, orig_top_id, new_chain)
def chain(self): if self._chain is None: dom = sdCache.produce_manifest(self.sd_id) repoPath = dom.getRepoPath() image_repo = image.Image(repoPath) chain = image_repo.getChain(self.sd_id, self.img_id) # When the VM is cloned from a template, the root volume of the # volumes chain is a shared volume. Shared volumes are not returned # in the volumes list when calling Image.getChain hence, we have to # add that volume manually. template = chain[0].getParentVolume() if template is not None: if not template.isShared(): raise se.UnexpectedVolumeState(template.volUUID, "Shared", "Not Shared") chain.insert(0, template) self._chain = [vol.volUUID for vol in chain] return self._chain
def test_qcow2_to_qcow2_estimated_size(self, monkeypatch, sd_class): monkeypatch.setattr(image, "config", CONFIG) monkeypatch.setattr( qemuimg, 'measure', # the estimated size for converting 1 gb # qcow2 empty volume to qcow2 format # cmd: # qemu-img measure -f qcow2 -O qcow2 test.qcow2 # output: # required size: 393216 # fully allocated size: 1074135040 lambda **args: {"required": 393216}) monkeypatch.setattr(image, 'sdCache', FakeStorageDomainCache()) image.sdCache.domains['sdUUID'] = sd_class("fake manifest") img = image.Image("/path/to/repo") vol_params = dict(size=constants.GIB, volFormat=sc.COW_FORMAT, path='path') estimated_size_blk = img.estimate_qcow2_size_blk(vol_params, "sdUUID") assert estimated_size_blk == 2097920
def test_calculate_vol_alloc( self, src_params, dest_format, expected_blk): img = image.Image("/path/to/repo") alloc_blk = img.calculate_vol_alloc("src_sd_id", src_params, "dst_sd_id", dest_format) self.assertEqual(alloc_blk, expected_blk)
def __getResourceCandidatesList(self, resourceName, lockType): """ Return list of lock candidates (template and volumes) """ # Must be imported here due to import cycles. # TODO: Move getChain to another module to we can use normal import. import vdsm.storage.image as image volResourcesList = [] template = None dom = sdCache.produce(sdUUID=self.sdUUID) # Get the list of the volumes repoPath = os.path.join(sc.REPO_DATA_CENTER, dom.getPools()[0]) try: chain = image.Image(repoPath).getChain(sdUUID=self.sdUUID, imgUUID=resourceName) except se.ImageDoesNotExistInSD: log.debug("Image %s does not exist in domain %s", resourceName, self.sdUUID) return [] # check if the chain is build above a template, or it is a standalone pvol = chain[0].getParentVolume() if pvol: template = pvol.volUUID elif chain[0].isShared(): # Image of template itself, # with no other volumes in chain template = chain[0].volUUID del chain[:] volUUIDChain = [vol.volUUID for vol in chain] volUUIDChain.sort() # Activate all volumes in chain at once. # We will attempt to activate all volumes again down to the flow with # no consequence, since they are already active. # TODO Fix resource framework to hold images, instead of specific vols. # This assumes that chains can not spread into more than one SD. if dom.__class__.__name__ == "BlockStorageDomain": lvm.activateLVs(self.sdUUID, volUUIDChain) failed = False # Acquire template locks: # - 'lockType' for template's image itself # - Always 'shared' lock for image based on template try: if template: if len(volUUIDChain) > 0: volRes = rm.acquireResource( self.volumeResourcesNamespace, template, rm.SHARED, timeout=self.resource_default_timeout) else: volRes = rm.acquireResource( self.volumeResourcesNamespace, template, lockType, timeout=self.resource_default_timeout) volResourcesList.append(volRes) # Acquire 'lockType' volume locks for volUUID in volUUIDChain: volRes = rm.acquireResource( self.volumeResourcesNamespace, volUUID, lockType, timeout=self.resource_default_timeout) volResourcesList.append(volRes) except (rm.RequestTimedOutError, se.ResourceAcqusitionFailed) as e: log.debug("Cannot acquire volume resource (%s)", str(e)) failed = True raise except Exception: log.debug("Cannot acquire volume resource", exc_info=True) failed = True raise finally: if failed: # Release already acquired template/volumes locks for volRes in volResourcesList: volRes.release() return volResourcesList