def test_chain_after_finalize(self, base_fmt): with self.make_env(format=base_fmt, chain_len=3) as env: base_vol = env.chain[0] # We write data to the base and will read it from the child volume # to verify that the chain is valid after qemu-rebase. offset = 0 pattern = 0xf0 length = 1024 qemu_pattern_write(base_vol.volumePath, sc.fmt2str(base_vol.getFormat()), offset=offset, len=length, pattern=pattern) top_vol = env.chain[1] child_vol = env.chain[2] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) merge.finalize(subchain) qemu_pattern_verify(child_vol.volumePath, sc.fmt2str(child_vol.getFormat()), offset=offset, len=length, pattern=pattern)
def test_finalize(self, sd_type, chain_len, base_index, top_index): with self.make_env(sd_type=sd_type, chain_len=chain_len) as env: base_vol = env.chain[base_index] top_vol = env.chain[top_index] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) merge.finalize(subchain) # If top has a child, the child must now be rebased on base. if top_vol is not env.chain[-1]: child_vol = env.chain[top_index + 1] info = qemuimg.info(child_vol.volumePath) self.assertEqual( info['backingfile'], volume.getBackingVolumePath(subchain.img_id, subchain.base_id)) # verify syncVolumeChain arguments self.check_sync_volume_chain(subchain, env.chain[-1].volUUID) new_chain = [vol.volUUID for vol in env.chain] new_chain.remove(top_vol.volUUID) self.assertEqual(image.Image.syncVolumeChain.actual_chain, new_chain) self.assertEqual(base_vol.getLegality(), sc.LEGAL_VOL)
def test_validate_top_is_not_in_chain(self): with self.make_env() as env: base_vol = env.chain[0] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=make_uuid(), base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) self.assertRaises(se.VolumeIsNotInChain, subchain.validate)
def test_validate_vol_is_not_base_parent(self): with self.make_env(chain_len=3) as env: base_vol = env.chain[0] top_vol = env.chain[2] subchain_info = dict(sd_id=top_vol.sdUUID, img_id=top_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) self.assertRaises(se.WrongParentVolume, subchain.validate)
def test_validate_vol_is_not_shared(self, shared_vol): with self.make_env(chain_len=3, shared=True) as env: base_vol = env.chain[0] top_vol = env.chain[1] env.chain[shared_vol].setShared() subchain_info = dict(sd_id=top_vol.sdUUID, img_id=top_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) self.assertRaises(se.SharedVolumeNonWritable, subchain.validate)
def test_legal_chain(self): with self.make_env() as env: base_vol = env.chain[0] top_vol = env.chain[1] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) # Next subchain.validate() should pass without exceptions subchain.validate()
def make_env(env_type, base, top): img_id = make_uuid() base_id = make_uuid() top_id = make_uuid() if env_type == 'block' and base.format == 'raw': prealloc = sc.PREALLOCATED_VOL else: prealloc = sc.SPARSE_VOL with fake_env(env_type) as env: env.make_volume(base.virtual * GB, img_id, base_id, vol_format=sc.name2type(base.format), prealloc=prealloc) env.make_volume(top.virtual * GB, img_id, top_id, parent_vol_id=base_id, vol_format=sc.COW_FORMAT) env.subchain = merge.SubchainInfo( dict(sd_id=env.sd_manifest.sdUUID, img_id=img_id, base_id=base_id, top_id=top_id), 0) if env_type == 'block': # Simulate allocation by adjusting the LV sizes env.lvm.extendLV(env.sd_manifest.sdUUID, base_id, base.physical * GB / MB) env.lvm.extendLV(env.sd_manifest.sdUUID, top_id, top.physical * GB / MB) rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (merge, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), (blockVolume, 'sdCache', env.sdcache), (image.Image, 'getChain', lambda self, sdUUID, imgUUID: [env.subchain.base_vol, env.subchain.top_vol]), (blockVolume.BlockVolume, 'extendSize', partial(fake_blockVolume_extendSize, env)), (fileVolume.FileVolume, 'extendSize', partial(fake_fileVolume_extendSize, env)), ]): yield env
def test_finalize_illegal_volume(self, volume): with self.make_env(sd_type='block', format='cow', chain_len=4) as env: base_vol = env.chain[1] top_vol = env.chain[2] if volume == 'base': base_vol.setLegality(sc.ILLEGAL_VOL) else: top_vol.setLegality(sc.ILLEGAL_VOL) subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) with self.assertRaises(se.prepareIllegalVolumeError): merge.finalize(subchain)
def test_qemuimg_rebase_failed(self): with self.make_env(sd_type='file', chain_len=4) as env: base_vol = env.chain[1] top_vol = env.chain[2] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) with MonkeyPatchScope([(qemuimg._qemuimg, '_cmd', '/usr/bin/false') ]): with self.assertRaises(cmdutils.Error): merge.finalize(subchain) self.assertEqual(subchain.top_vol.getLegality(), sc.LEGAL_VOL) self.assertEqual(subchain.top_vol.getParent(), base_vol.volUUID)
def test_merge_illegal_volume(self, volume): job_id = make_uuid() with self.make_env(sd_type='block', chain_len=2) as env: write_qemu_chain(env.chain) base_vol = env.chain[0] top_vol = env.chain[1] if volume == 'base': base_vol.setLegality(sc.ILLEGAL_VOL) else: top_vol.setLegality(sc.ILLEGAL_VOL) subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) job = storage.sdm.api.merge.Job(job_id, subchain) job.run() self.assertEqual(job.status, jobs.STATUS.FAILED) self.assertEqual(type(job.error), se.prepareIllegalVolumeError)
def test_reduce_not_chunked(self): with self.make_env(sd_type='file', format='cow', chain_len=4) as env: base_vol = env.chain[1] top_vol = env.chain[2] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) merge.finalize(subchain) fake_sd = env.sdcache.domains[env.sd_manifest.sdUUID] fake_base_vol = fake_sd.produceVolume(subchain.img_id, subchain.base_id) calls = getattr(fake_base_vol, "__calls__", {}) # Verify that 'calls' is empty which means that 'reduce' wasn't # called self.assertEqual(len(calls), 0)
def test_reduce_chunked(self): with self.make_env(sd_type='block', format='cow', chain_len=4) as env: base_vol = env.chain[1] top_vol = env.chain[2] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) merge.finalize(subchain) fake_sd = env.sdcache.domains[env.sd_manifest.sdUUID] fake_base_vol = fake_sd.produceVolume(subchain.img_id, subchain.base_id) self.assertEqual(len(fake_base_vol.__calls__), 1) optimal_size = base_vol.optimal_size() // sc.BLOCK_SIZE self.assertEqual(fake_base_vol.__calls__[0], ('reduce', (optimal_size, ), {}))
def test_merge_subchain(self, sd_type, chain_len, base_index, top_index): job_id = make_uuid() with self.make_env(sd_type=sd_type, chain_len=chain_len) as env: write_qemu_chain(env.chain) base_vol = env.chain[base_index] top_vol = env.chain[top_index] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) job = storage.sdm.api.merge.Job(job_id, subchain) job.run() wait_for_job(job) self.assertEqual(job.status, jobs.STATUS.DONE) # Verify that the chain data was merged for i in range(base_index, top_index + 1): offset = i * 1024 pattern = 0xf0 + i # We expect to read all data from top qemu_pattern_verify(top_vol.volumePath, qemuimg.FORMAT.QCOW2, offset=offset, len=1024, pattern=pattern) # And base, since top was merged into base qemu_pattern_verify(base_vol.volumePath, sc.fmt2str(base_vol.getFormat()), offset=offset, len=1024, pattern=pattern) self.assertEqual(sorted(self.expected_locks(base_vol)), sorted(guarded.context.locks)) self.assertEqual(base_vol.getLegality(), sc.LEGAL_VOL) self.assertEqual(base_vol.getMetaParam(sc.GENERATION), 1)
def test_reduce_failure(self): with self.make_env(sd_type='block', format='cow', chain_len=4) as env: base_vol = env.chain[0] top_vol = env.chain[1] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) fake_sd = env.sdcache.domains[env.sd_manifest.sdUUID] fake_base_vol = fake_sd.produceVolume(subchain.img_id, subchain.base_id) fake_base_vol.errors["reduce"] = se.LogicalVolumeExtendError( "vgname", "lvname", base_vol.optimal_size()) with self.assertRaises(se.LogicalVolumeExtendError): merge.finalize(subchain) # verify syncVolumeChain arguments self.check_sync_volume_chain(subchain, env.chain[-1].volUUID)
def test_subchain_validation(self): job_id = make_uuid() with self.make_env(sd_type='file', chain_len=2) as env: write_qemu_chain(env.chain) base_index = 0 top_index = 1 base_vol = env.chain[base_index] base_vol.setLegality(sc.ILLEGAL_VOL) top_vol = env.chain[top_index] subchain_info = dict(sd_id=top_vol.sdUUID, img_id=top_vol.imgUUID, base_id=base_vol.imgUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) def fail(): raise se.VolumeIsNotInChain(None, None, None) # We already tested that subchain validate does the right thing, # here we test that this job care to call subchain validate. subchain.validate = fail job = storage.sdm.api.merge.Job(job_id, subchain) job.run() wait_for_job(job) self.assertEqual(job.status, jobs.STATUS.FAILED) self.assertEqual(type(job.error), se.VolumeIsNotInChain) # Check that validate is called *before* attempting - verify that # the chain data was *not* merged offset = base_index * 1024 pattern = 0xf0 + base_index qemu_pattern_verify(base_vol.volumePath, qemuimg.FORMAT.RAW, offset=offset, len=1024, pattern=pattern) self.assertEqual(base_vol.getMetaParam(sc.GENERATION), 0)
def test_rollback_volume_legallity_failed(self): with self.make_env(sd_type='block', chain_len=4) as env: base_vol = env.chain[1] top_vol = env.chain[2] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) def setLegality(self, legality): if legality == sc.LEGAL_VOL: raise RuntimeError("Rollback volume legality failed") self.setMetaParam(sc.LEGALITY, legality) with MonkeyPatchScope([ (qemuimg._qemuimg, '_cmd', '/usr/bin/false'), (volume.VolumeManifest, 'setLegality', setLegality), ]): with self.assertRaises(cmdutils.Error): merge.finalize(subchain) self.assertEqual(subchain.top_vol.getLegality(), sc.ILLEGAL_VOL)