def test_finalize(self, sd_type, chain_len, base_index, top_index): with self.make_env(sd_type=sd_type, chain_len=chain_len) as env: base_vol = env.chain[base_index] top_vol = env.chain[top_index] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) merge.finalize(subchain) # If top has a child, the child must now be rebased on base. if top_vol is not env.chain[-1]: child_vol = env.chain[top_index + 1] info = qemuimg.info(child_vol.volumePath) backing_file = volume.getBackingVolumePath( subchain.img_id, subchain.base_id) assert info['backingfile'] == backing_file # verify syncVolumeChain arguments self.check_sync_volume_chain(subchain, env.chain[-1].volUUID) new_chain = [vol.volUUID for vol in env.chain] new_chain.remove(top_vol.volUUID) assert image.Image.syncVolumeChain.actual_chain == new_chain assert base_vol.getLegality() == sc.LEGAL_VOL
def test_subchain_validation(self): job_id = make_uuid() with self.make_env(sd_type='file', chain_len=2) as env: write_qemu_chain(env.chain) base_index = 0 top_index = 1 base_vol = env.chain[base_index] base_vol.setLegality(sc.ILLEGAL_VOL) top_vol = env.chain[top_index] subchain_info = dict(sd_id=top_vol.sdUUID, img_id=top_vol.imgUUID, base_id=base_vol.imgUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) def fail(): raise se.VolumeIsNotInChain(None, None, None) # We already tested that subchain validate does the right thing, # here we test that this job care to call subchain validate. subchain.validate = fail job = api_merge.Job(job_id, subchain) job.run() self.assertEqual(job.status, jobs.STATUS.FAILED) self.assertEqual(type(job.error), se.VolumeIsNotInChain) # Check that validate is called *before* attempting - verify that # the chain data was *not* merged offset = base_index * 1024 pattern = 0xf0 + base_index verify_pattern(base_vol.volumePath, qemuimg.FORMAT.RAW, offset=offset, len=1024, pattern=pattern) self.assertEqual(base_vol.getMetaParam(sc.GENERATION), 0)
def test_rollback_volume_legallity_failed(self): with self.make_env(sd_type='block', chain_len=4) as env: base_vol = env.chain[1] top_vol = env.chain[2] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) def setLegality(self, legality): if legality == sc.LEGAL_VOL: raise RuntimeError("Rollback volume legality failed") self.setMetaParam(sc.LEGALITY, legality) with MonkeyPatch().context() as mp: def failing_rebase(*args, **kw): return operation.Command("/usr/bin/false") mp.setattr(qemuimg, 'rebase', failing_rebase) mp.setattr(volume.VolumeManifest, 'setLegality', setLegality) with pytest.raises(cmdutils.Error): merge.finalize(subchain) assert subchain.top_vol.getLegality() == sc.ILLEGAL_VOL
def test_chain_after_finalize(self, base_fmt): with self.make_env(format=base_fmt, chain_len=3) as env: base_vol = env.chain[0] # We write data to the base and will read it from the child volume # to verify that the chain is valid after qemu-rebase. offset = 0 pattern = 0xf0 length = 1024 qemuio.write_pattern(base_vol.volumePath, sc.fmt2str(base_vol.getFormat()), offset=offset, len=length, pattern=pattern) top_vol = env.chain[1] child_vol = env.chain[2] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) merge.finalize(subchain) qemuio.verify_pattern(child_vol.volumePath, sc.fmt2str(child_vol.getFormat()), offset=offset, len=length, pattern=pattern)
def test_merge_subchain_with_bitmaps( self, sd_type, chain_len, base_index, top_index): job_id = make_uuid() bitmap1_name = 'bitmap1' bitmap2_name = 'bitmap2' with self.make_env( sd_type=sd_type, chain_len=chain_len, base_format=sc.COW_FORMAT, qcow2_compat='1.1') as env: base_vol = env.chain[base_index] top_vol = env.chain[top_index] # Add new bitmap to base_vol and top_vol for vol in [base_vol, top_vol]: op = qemuimg.bitmap_add( vol.getVolumePath(), bitmap1_name, ) op.run() # Add another bitmap to top_vol only # to test add + merge op = qemuimg.bitmap_add( top_vol.getVolumePath(), bitmap2_name, ) op.run() # Writing data to the chain to modify the bitmaps write_qemu_chain(env.chain) subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) job = api_merge.Job(job_id, subchain, merge_bitmaps=True) job.run() self.assertEqual(job.status, jobs.STATUS.DONE) info = qemuimg.info(base_vol.getVolumePath()) # TODO: we should improve this test by adding a # a verification to the extents that are reported # by qemu-nbd. assert info['format-specific']['data']['bitmaps'] == [ { "flags": ["auto"], "name": bitmap1_name, "granularity": 65536 }, { "flags": ["auto"], "name": bitmap2_name, "granularity": 65536 }, ]
def test_validate_top_is_not_in_chain(self): with self.make_env() as env: base_vol = env.chain[0] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=make_uuid(), base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) self.assertRaises(se.VolumeIsNotInChain, subchain.validate)
def test_validate_vol_is_not_base_parent(self): with self.make_env(chain_len=3) as env: base_vol = env.chain[0] top_vol = env.chain[2] subchain_info = dict(sd_id=top_vol.sdUUID, img_id=top_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) self.assertRaises(se.WrongParentVolume, subchain.validate)
def test_validate_base_is_not_in_chain(self): with self.make_env() as env: top_vol = env.chain[1] subchain_info = dict(sd_id=top_vol.sdUUID, img_id=top_vol.imgUUID, base_id=make_uuid(), top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) with pytest.raises(se.VolumeIsNotInChain): subchain.validate()
def test_legal_chain(self): with self.make_env() as env: base_vol = env.chain[0] top_vol = env.chain[1] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) # Next subchain.validate() should pass without exceptions subchain.validate()
def test_validate_vol_is_not_shared(self, shared_vol): with self.make_env(chain_len=3, shared=True) as env: base_vol = env.chain[0] top_vol = env.chain[1] env.chain[shared_vol].setShared() subchain_info = dict(sd_id=top_vol.sdUUID, img_id=top_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) self.assertRaises(se.SharedVolumeNonWritable, subchain.validate)
def make_env(env_type, base, top): img_id = make_uuid() base_id = make_uuid() top_id = make_uuid() if env_type == 'block' and base.format == 'raw': prealloc = sc.PREALLOCATED_VOL else: prealloc = sc.SPARSE_VOL with fake_env(env_type) as env: with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, "config", CONFIG) mp.setattr(blockVolume, 'rm', FakeResourceManager()) mp.setattr(blockVolume, 'sdCache', env.sdcache) mp.setattr( image.Image, 'getChain', lambda self, sdUUID, imgUUID: [env.subchain.base_vol, env.subchain.top_vol]) env.make_volume(base.virtual * GiB, img_id, base_id, vol_format=sc.name2type(base.format), prealloc=prealloc, vol_type=sc.INTERNAL_VOL) env.make_volume( top.virtual * GiB, img_id, top_id, parent_vol_id=base_id, vol_format=sc.COW_FORMAT, vol_type=sc.LEAF_VOL if top.leaf else sc.INTERNAL_VOL) env.subchain = merge.SubchainInfo( dict(sd_id=env.sd_manifest.sdUUID, img_id=img_id, base_id=base_id, top_id=top_id), 0) if env_type == 'block': # Simulate allocation by adjusting the LV sizes env.lvm.extendLV(env.sd_manifest.sdUUID, base_id, base.physical * GiB // MiB) env.lvm.extendLV(env.sd_manifest.sdUUID, top_id, top.physical * GiB // MiB) yield env
def make_env(env_type, base, top): img_id = make_uuid() base_id = make_uuid() top_id = make_uuid() if env_type == 'block' and base.format == 'raw': prealloc = sc.PREALLOCATED_VOL else: prealloc = sc.SPARSE_VOL with fake_env(env_type) as env: env.make_volume(base.virtual * GB, img_id, base_id, vol_format=sc.name2type(base.format), prealloc=prealloc) env.make_volume(top.virtual * GB, img_id, top_id, parent_vol_id=base_id, vol_format=sc.COW_FORMAT) env.subchain = merge.SubchainInfo( dict(sd_id=env.sd_manifest.sdUUID, img_id=img_id, base_id=base_id, top_id=top_id), 0) if env_type == 'block': # Simulate allocation by adjusting the LV sizes env.lvm.extendLV(env.sd_manifest.sdUUID, base_id, base.physical * GB / MB) env.lvm.extendLV(env.sd_manifest.sdUUID, top_id, top.physical * GB / MB) rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (merge, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), (blockVolume, 'sdCache', env.sdcache), (image.Image, 'getChain', lambda self, sdUUID, imgUUID: [env.subchain.base_vol, env.subchain.top_vol]), (blockVolume.BlockVolume, 'extendSize', partial(fake_blockVolume_extendSize, env)), (fileVolume.FileVolume, 'extendSize', partial(fake_fileVolume_extendSize, env)), ]): yield env
def test_finalize_illegal_volume(self, volume): with self.make_env(sd_type='block', format='cow', chain_len=4) as env: base_vol = env.chain[1] top_vol = env.chain[2] if volume == 'base': base_vol.setLegality(sc.ILLEGAL_VOL) else: top_vol.setLegality(sc.ILLEGAL_VOL) subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) with pytest.raises(se.prepareIllegalVolumeError): merge.finalize(subchain)
def test_merge_subchain(self, sd_type, chain_len, base_index, top_index): job_id = make_uuid() with self.make_env(sd_type=sd_type, chain_len=chain_len) as env: write_qemu_chain(env.chain) base_vol = env.chain[base_index] top_vol = env.chain[top_index] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) job = api_merge.Job(job_id, subchain) job.run() wait_for_job(job) self.assertEqual(job.status, jobs.STATUS.DONE) # Verify that the chain data was merged for i in range(base_index, top_index + 1): offset = i * 1024 pattern = 0xf0 + i # We expect to read all data from top verify_pattern( top_vol.volumePath, qemuimg.FORMAT.QCOW2, offset=offset, len=1024, pattern=pattern) # And base, since top was merged into base verify_pattern( base_vol.volumePath, sc.fmt2str(base_vol.getFormat()), offset=offset, len=1024, pattern=pattern) self.assertEqual(sorted(self.expected_locks(base_vol)), sorted(guarded.context.locks)) self.assertEqual(base_vol.getLegality(), sc.LEGAL_VOL) self.assertEqual(base_vol.getMetaParam(sc.GENERATION), 1)
def test_qemuimg_rebase_failed(self): with self.make_env(sd_type='file', chain_len=4) as env: base_vol = env.chain[1] top_vol = env.chain[2] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) with MonkeyPatch().context() as mp: mp.setattr(qemuimg._qemuimg, '_cmd', '/usr/bin/false') with pytest.raises(cmdutils.Error): merge.finalize(subchain) assert subchain.top_vol.getLegality() == sc.LEGAL_VOL assert subchain.top_vol.getParent() == base_vol.volUUID
def test_qemuimg_rebase_failed(self): with self.make_env(sd_type='file', chain_len=4) as env: base_vol = env.chain[1] top_vol = env.chain[2] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) with MonkeyPatchScope([(qemuimg._qemuimg, '_cmd', '/usr/bin/false') ]): with self.assertRaises(cmdutils.Error): merge.finalize(subchain) self.assertEqual(subchain.top_vol.getLegality(), sc.LEGAL_VOL) self.assertEqual(subchain.top_vol.getParent(), base_vol.volUUID)
def test_reduce_chunked(self): with self.make_env(sd_type='block', format='cow', chain_len=4) as env: base_vol = env.chain[1] top_vol = env.chain[2] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) merge.finalize(subchain) fake_sd = env.sdcache.domains[env.sd_manifest.sdUUID] fake_base_vol = fake_sd.produceVolume(subchain.img_id, subchain.base_id) assert fake_base_vol.__calls__ == [ ('reduce', (base_vol.optimal_size(), ), {}), ]
def test_reduce_not_chunked(self): with self.make_env(sd_type='file', format='cow', chain_len=4) as env: base_vol = env.chain[1] top_vol = env.chain[2] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) merge.finalize(subchain) fake_sd = env.sdcache.domains[env.sd_manifest.sdUUID] fake_base_vol = fake_sd.produceVolume(subchain.img_id, subchain.base_id) calls = getattr(fake_base_vol, "__calls__", {}) # Verify that 'calls' is empty which means that 'reduce' wasn't # called assert len(calls) == 0
def test_merge_illegal_volume(self, volume): job_id = make_uuid() with self.make_env(sd_type='block', chain_len=2) as env: write_qemu_chain(env.chain) base_vol = env.chain[0] top_vol = env.chain[1] if volume == 'base': base_vol.setLegality(sc.ILLEGAL_VOL) else: top_vol.setLegality(sc.ILLEGAL_VOL) subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) job = api_merge.Job(job_id, subchain) job.run() self.assertEqual(job.status, jobs.STATUS.FAILED) self.assertEqual(type(job.error), se.prepareIllegalVolumeError)
def test_reduce_failure(self): with self.make_env(sd_type='block', format='cow', chain_len=4) as env: base_vol = env.chain[0] top_vol = env.chain[1] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) fake_sd = env.sdcache.domains[env.sd_manifest.sdUUID] fake_base_vol = fake_sd.produceVolume(subchain.img_id, subchain.base_id) fake_base_vol.errors["reduce"] = se.LogicalVolumeExtendError( "vgname", "lvname", base_vol.optimal_size()) with pytest.raises(se.LogicalVolumeExtendError): merge.finalize(subchain) # verify syncVolumeChain arguments self.check_sync_volume_chain(subchain, env.chain[-1].volUUID)
def test_rollback_volume_legallity_failed(self): with self.make_env(sd_type='block', chain_len=4) as env: base_vol = env.chain[1] top_vol = env.chain[2] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) def setLegality(self, legality): if legality == sc.LEGAL_VOL: raise RuntimeError("Rollback volume legality failed") self.setMetaParam(sc.LEGALITY, legality) with MonkeyPatchScope([ (qemuimg._qemuimg, '_cmd', '/usr/bin/false'), (volume.VolumeManifest, 'setLegality', setLegality), ]): with self.assertRaises(cmdutils.Error): merge.finalize(subchain) self.assertEqual(subchain.top_vol.getLegality(), sc.ILLEGAL_VOL)