def test_intra_domain_copy(env_type, src_fmt, dst_fmt): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) job_id = make_uuid() with make_env(env_type, src_fmt, dst_fmt) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] write_qemu_chain(env.src_chain) with pytest.raises(qemuio.VerificationError): verify_qemu_chain(env.dst_chain) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() assert (sorted(expected_locks(src_vol, dst_vol)) == sorted( guarded.context.locks)) assert jobs.STATUS.DONE == job.status assert 100.0 == job.progress assert 'error' not in job.info() verify_qemu_chain(env.dst_chain) assert (sc.fmt2str(dst_fmt) == qemuimg.info( dst_vol.volumePath)['format'])
def test_qcow2_compat(self, env_type, qcow2_compat, sd_version): src_fmt = sc.name2type("cow") dst_fmt = sc.name2type("cow") job_id = make_uuid() with self.make_env(env_type, src_fmt, dst_fmt, sd_version=sd_version, src_qcow2_compat=qcow2_compat) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() actual_compat = qemuimg.info(dst_vol.volumePath)['compat'] self.assertEqual(actual_compat, env.sd_manifest.qcow2_compat())
def test_intra_domain_copy(self, env_type, src_fmt, dst_fmt): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) job_id = make_uuid() with self.make_env(env_type, src_fmt, dst_fmt) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] write_qemu_chain(env.src_chain) self.assertRaises(ChainVerificationError, verify_qemu_chain, env.dst_chain) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = storage.sdm.api.copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(100.0, job.progress) self.assertNotIn('error', job.info()) verify_qemu_chain(env.dst_chain) self.assertEqual(sc.fmt2str(dst_fmt), qemuimg.info(dst_vol.volumePath)['format'])
def test_intra_domain_copy(self, env_type, src_fmt, dst_fmt): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) job_id = make_uuid() with self.get_vols(env_type, src_fmt, dst_fmt) as (src_chain, dst_chain): src_vol = src_chain[0] dst_vol = dst_chain[0] write_qemu_chain(src_chain) self.assertRaises(ChainVerificationError, verify_qemu_chain, dst_chain) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = storage.sdm.api.copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(100.0, job.progress) self.assertNotIn('error', job.info()) verify_qemu_chain(dst_chain) self.assertEqual(sc.fmt2str(dst_fmt), qemuimg.info(dst_vol.volumePath)['format'])
def test_intra_domain_copy(self, env_type, src_fmt, dst_fmt): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) job_id = str(uuid.uuid4()) with self.get_vols(env_type, src_fmt, dst_fmt) as (src_chain, dst_chain): src_vol = src_chain[0] dst_vol = dst_chain[0] write_qemu_chain(src_chain) self.assertRaises(ChainVerificationError, verify_qemu_chain, dst_chain) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = storage.sdm.api.copy_data.Job(job_id, None, source, dest) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(100.0, job.progress) self.assertNotIn('error', job.info()) verify_qemu_chain(dst_chain) self.assertEqual(sc.fmt2str(dst_fmt), qemuimg.info(dst_vol.volumePath)['format'])
def test_volume_chain_copy(self, env_type, src_fmt, dst_fmt, copy_seq): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) nr_vols = len(copy_seq) with self.get_vols(env_type, src_fmt, dst_fmt, chain_length=nr_vols) as (src_chain, dst_chain): write_qemu_chain(src_chain) for index in copy_seq: job_id = make_uuid() src_vol = src_chain[index] dst_vol = dst_chain[index] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = storage.sdm.api.copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) verify_qemu_chain(dst_chain)
def make_env(env_type, base, top): img_id = make_uuid() base_id = make_uuid() top_id = make_uuid() if env_type == "block" and base.format == "raw": prealloc = sc.PREALLOCATED_VOL else: prealloc = sc.SPARSE_VOL with fake_env(env_type) as env: env.make_volume(base.virtual * GB, img_id, base_id, vol_format=sc.name2type(base.format), prealloc=prealloc) env.make_volume(top.virtual * GB, img_id, top_id, parent_vol_id=base_id, vol_format=sc.COW_FORMAT) env.subchain = merge.SubchainInfo( dict(sd_id=env.sd_manifest.sdUUID, img_id=img_id, base_id=base_id, top_id=top_id), 0 ) if env_type == "block": # Simulate allocation by adjusting the LV sizes env.lvm.extendLV(env.sd_manifest.sdUUID, base_id, base.physical * GB / MB) env.lvm.extendLV(env.sd_manifest.sdUUID, top_id, top.physical * GB / MB) rm = FakeResourceManager() with MonkeyPatchScope( [ (guarded, "context", fake_guarded_context()), (merge, "sdCache", env.sdcache), (blockVolume, "rm", rm), (blockVolume, "sdCache", env.sdcache), (image.Image, "getChain", lambda self, sdUUID, imgUUID: [env.subchain.base_vol, env.subchain.top_vol]), (blockVolume.BlockVolume, "extendSize", partial(fake_blockVolume_extendSize, env)), (fileVolume.FileVolume, "extendSize", partial(fake_fileVolume_extendSize, env)), ] ): yield env
def test_clear_invalid_bitmaps(fake_scheduler, env_type): with make_env(env_type, sc.name2type('cow')) as env: top_vol = env.chain[0] # Add new invalid bitmaps to top volume for bitmap in ['bitmap_1', 'bitmap_2']: op = qemuimg.bitmap_add(top_vol.getVolumePath(), bitmap, enable=False) op.run() # Clear the created bitmap generation = top_vol.getMetaParam(sc.GENERATION) vol = { 'endpoint_type': 'div', 'sd_id': top_vol.sdUUID, 'img_id': top_vol.imgUUID, 'vol_id': top_vol.volUUID, 'generation': generation } job = clear_bitmaps.Job(make_uuid(), 0, vol) job.run() assert jobs.STATUS.DONE == job.status vol_info = qemuimg.info(top_vol.getVolumePath()) assert "bitmaps" not in vol_info["format-specific"]["data"] assert top_vol.getMetaParam(sc.GENERATION) == generation + 1
def test_add_remove_bitmap(fake_scheduler, env_type): bitmap1 = "bitmap1" bitmap2 = "bitmap2" with make_env(env_type, sc.name2type('cow')) as env: top_vol = env.chain[0] # Add bitmaps to the volume for bitmap in [bitmap1, bitmap2]: op = qemuimg.bitmap_add(top_vol.getVolumePath(), bitmap) op.run() # Remove one of the created bitmap generation = top_vol.getMetaParam(sc.GENERATION) vol = { 'endpoint_type': 'div', 'sd_id': top_vol.sdUUID, 'img_id': top_vol.imgUUID, 'vol_id': top_vol.volUUID, 'generation': generation } job = remove_bitmap.Job(make_uuid(), 0, vol, bitmap1) job.run() assert jobs.STATUS.DONE == job.status vol_info = qemuimg.info(top_vol.getVolumePath()) bitmaps = [ b["name"] for b in vol_info["format-specific"]["data"].get("bitmaps", []) ] assert bitmap1 not in bitmaps and bitmap2 in bitmaps assert top_vol.getMetaParam(sc.GENERATION) == generation + 1
def test_remove_invalid_bitmap(fake_scheduler, env_type): bitmap = "bitmap" with make_env(env_type, sc.name2type('cow')) as env: base_vol = env.chain[0] # Add bitmap to base volume op = qemuimg.bitmap_add( base_vol.getVolumePath(), bitmap, ) op.run() # Simulate qemu crash, leaving bitmaps with the "in-use" # flag by opening the image for writing and killing the process. qemuio.abort(base_vol.getVolumePath()) generation = base_vol.getMetaParam(sc.GENERATION) vol = { 'endpoint_type': 'div', 'sd_id': base_vol.sdUUID, 'img_id': base_vol.imgUUID, 'vol_id': base_vol.volUUID, 'generation': generation } job = remove_bitmap.Job(make_uuid(), 0, vol, bitmap) job.run() assert jobs.STATUS.DONE == job.status vol_info = qemuimg.info(base_vol.getVolumePath()) bitmaps = vol_info["format-specific"]["data"].get("bitmaps", []) assert not bitmaps assert base_vol.getMetaParam(sc.GENERATION) == generation + 1
def test_remove_inactive_bitmap(fake_scheduler, env_type): bitmap = "bitmap" with make_env(env_type, sc.name2type('cow')) as env: base_vol = env.chain[0] # Add inactive bitmap to base volume op = qemuimg.bitmap_add(base_vol.getVolumePath(), bitmap, enable=False) op.run() generation = base_vol.getMetaParam(sc.GENERATION) vol = { 'endpoint_type': 'div', 'sd_id': base_vol.sdUUID, 'img_id': base_vol.imgUUID, 'vol_id': base_vol.volUUID, 'generation': generation } job = remove_bitmap.Job(make_uuid(), 0, vol, bitmap) job.run() assert jobs.STATUS.DONE == job.status vol_info = qemuimg.info(base_vol.getVolumePath()) bitmaps = vol_info["format-specific"]["data"].get("bitmaps", []) assert not bitmaps assert base_vol.getMetaParam(sc.GENERATION) == generation + 1
def make_env(self, sd_type='block', format='raw', prealloc=sc.SPARSE_VOL, chain_len=2): size = 2 * GiB base_fmt = sc.name2type(format) with fake_env(sd_type) as env: with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, 'rm', FakeResourceManager()) mp.setattr(image, 'Image', FakeImage) env.chain = make_qemu_chain(env, size, base_fmt, chain_len, prealloc=prealloc) volumes = {(vol.imgUUID, vol.volUUID): FakeVolume() for vol in env.chain} env.sdcache.domains[env.sd_manifest.sdUUID].volumes = volumes def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain image.Image.syncVolumeChain = FakeSyncVolumeChain() yield env
def test_get_children(self): remote_path = "[2001:db8:85a3::8a2e:370:7334]:1234:/path" size = 5 * MEGAB # Simulate a domain with an ipv6 address with fake_env(storage_type='file', remote_path=remote_path) as env: env.chain = make_qemu_chain(env, size, sc.name2type('raw'), 2) base_vol = env.chain[0] assert (env.chain[1].volUUID, ) == base_vol.getChildren()
def test_get_children(self): remote_path = "[2001:db8:85a3::8a2e:370:7334]:1234:/path" size = 5 * MEGAB # Simulate a domain with an ipv6 address with fake_env(storage_type='file', remote_path=remote_path) as env: env.chain = make_qemu_chain(env, size, sc.name2type('raw'), 2) base_vol = env.chain[0] assert (env.chain[1].volUUID,) == base_vol.getChildren()
def test_volume_chain_copy(self, env_type, src_fmt, dst_fmt, copy_seq): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) nr_vols = len(copy_seq) with self.make_env(env_type, src_fmt, dst_fmt, chain_length=nr_vols) as env: write_qemu_chain(env.src_chain) for index in copy_seq: job_id = make_uuid() src_vol = env.src_chain[index] dst_vol = env.dst_chain[index] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) verify_qemu_chain(env.dst_chain)
def test_volume_chain_copy(self, env_type, src_fmt, dst_fmt, copy_seq): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) nr_vols = len(copy_seq) with self.get_vols(env_type, src_fmt, dst_fmt, chain_length=nr_vols) as (src_chain, dst_chain): write_qemu_chain(src_chain) for index in copy_seq: job_id = str(uuid.uuid4()) src_vol = src_chain[index] dst_vol = dst_chain[index] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = storage.sdm.api.copy_data.Job(job_id, None, source, dest) job.run() wait_for_job(job) verify_qemu_chain(dst_chain)
def make_env(self, storage_type, fmt=sc.name2type('cow'), chain_length=1, size=DEFAULT_SIZE, qcow2_compat='0.10'): with fake_env(storage_type, sd_version=4) as env: rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (copy_data, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), ]): env.chain = make_qemu_chain(env, size, fmt, chain_length, qcow2_compat=qcow2_compat) yield env
def test_clear_bitmaps_from_vol_chain(fake_scheduler, env_type): bitmap1 = "bitmap1" bitmap2 = "bitmap2" with make_env(env_type, sc.name2type('cow'), chain_length=3) as env: # Add the bitmap to all the volumes in the chain for vol in env.chain: op = qemuimg.bitmap_add(vol.getVolumePath(), bitmap1) op.run() op = qemuimg.bitmap_add(vol.getVolumePath(), bitmap2) op.run() # Clear all the bitmaps from the leaf volume leaf_vol = env.chain[2] generation = leaf_vol.getMetaParam(sc.GENERATION) vol = { 'endpoint_type': 'div', 'sd_id': leaf_vol.sdUUID, 'img_id': leaf_vol.imgUUID, 'vol_id': leaf_vol.volUUID, 'generation': generation } job = clear_bitmaps.Job(make_uuid(), 0, vol) job.run() assert jobs.STATUS.DONE == job.status # Validate that all the bitmaps was removed from the leaf volume vol_info = qemuimg.info(leaf_vol.getVolumePath()) bitmaps = vol_info["format-specific"]["data"].get("bitmaps", []) assert not bitmaps assert leaf_vol.getMetaParam(sc.GENERATION) == generation + 1 # Clear all the bitmaps from an internal volume internal_vol = env.chain[1] generation = internal_vol.getMetaParam(sc.GENERATION) vol = { 'endpoint_type': 'div', 'sd_id': internal_vol.sdUUID, 'img_id': internal_vol.imgUUID, 'vol_id': internal_vol.volUUID, 'generation': generation } job = clear_bitmaps.Job(make_uuid(), 0, vol) job.run() assert jobs.STATUS.DONE == job.status # Validate that all the bitmaps was removed from the internal volume vol_info = qemuimg.info(internal_vol.getVolumePath()) bitmaps = vol_info["format-specific"]["data"].get("bitmaps", []) assert not bitmaps assert internal_vol.getMetaParam(sc.GENERATION) == generation + 1
def test_qcow2_compat(user_mount, fake_scheduler, qcow2_compat, sd_version): src_fmt = sc.name2type("cow") dst_fmt = sc.name2type("cow") job_id = make_uuid() data_center = os.path.join(user_mount.path, "data-center") with make_env("file", src_fmt, dst_fmt, sd_version=sd_version, src_qcow2_compat=qcow2_compat, data_center=data_center) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() dst_info = qemuimg.info(dst_vol.volumePath) actual_compat = dst_info['format-specific']['data']['compat'] assert actual_compat == env.sd_manifest.qcow2_compat() # After the copy, images must be exactly the same. op = qemuimg.compare( src_vol.getVolumePath(), dst_vol.getVolumePath(), img1_format='qcow2', img2_format='qcow2', strict=True, ) op.run()
def make_env(env_type, base, top): img_id = make_uuid() base_id = make_uuid() top_id = make_uuid() if env_type == 'block' and base.format == 'raw': prealloc = sc.PREALLOCATED_VOL else: prealloc = sc.SPARSE_VOL with fake_env(env_type) as env: with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, "config", CONFIG) mp.setattr(blockVolume, 'rm', FakeResourceManager()) mp.setattr(blockVolume, 'sdCache', env.sdcache) mp.setattr( image.Image, 'getChain', lambda self, sdUUID, imgUUID: [env.subchain.base_vol, env.subchain.top_vol]) env.make_volume(base.virtual * GiB, img_id, base_id, vol_format=sc.name2type(base.format), prealloc=prealloc, vol_type=sc.INTERNAL_VOL) env.make_volume( top.virtual * GiB, img_id, top_id, parent_vol_id=base_id, vol_format=sc.COW_FORMAT, vol_type=sc.LEAF_VOL if top.leaf else sc.INTERNAL_VOL) env.subchain = merge.SubchainInfo( dict(sd_id=env.sd_manifest.sdUUID, img_id=img_id, base_id=base_id, top_id=top_id), 0) if env_type == 'block': # Simulate allocation by adjusting the LV sizes env.lvm.extendLV(env.sd_manifest.sdUUID, base_id, base.physical * GiB // MiB) env.lvm.extendLV(env.sd_manifest.sdUUID, top_id, top.physical * GiB // MiB) yield env
def _run(self): vol_format = sc.name2type(self.vol_info.vol_format) with self.sd_manifest.domain_lock(self.host_id): image_res_ns = sd.getNamespace(sc.IMAGE_NAMESPACE, self.sd_manifest.sdUUID) with rm.acquireResource(image_res_ns, self.vol_info.img_id, rm.EXCLUSIVE): artifacts = self.sd_manifest.get_volume_artifacts( self.vol_info.img_id, self.vol_info.vol_id) artifacts.create( self.vol_info.virtual_size, vol_format, self.vol_info.disk_type, self.vol_info.description, self.vol_info.parent, self.vol_info.initial_size) artifacts.commit()
def make_env(env_type, base, top): img_id = make_uuid() base_id = make_uuid() top_id = make_uuid() if env_type == 'block' and base.format == 'raw': prealloc = sc.PREALLOCATED_VOL else: prealloc = sc.SPARSE_VOL with fake_env(env_type) as env: env.make_volume(base.virtual * GB, img_id, base_id, vol_format=sc.name2type(base.format), prealloc=prealloc) env.make_volume(top.virtual * GB, img_id, top_id, parent_vol_id=base_id, vol_format=sc.COW_FORMAT) env.subchain = merge.SubchainInfo( dict(sd_id=env.sd_manifest.sdUUID, img_id=img_id, base_id=base_id, top_id=top_id), 0) if env_type == 'block': # Simulate allocation by adjusting the LV sizes env.lvm.extendLV(env.sd_manifest.sdUUID, base_id, base.physical * GB / MB) env.lvm.extendLV(env.sd_manifest.sdUUID, top_id, top.physical * GB / MB) rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (merge, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), (blockVolume, 'sdCache', env.sdcache), (image.Image, 'getChain', lambda self, sdUUID, imgUUID: [env.subchain.base_vol, env.subchain.top_vol]), (blockVolume.BlockVolume, 'extendSize', partial(fake_blockVolume_extendSize, env)), (fileVolume.FileVolume, 'extendSize', partial(fake_fileVolume_extendSize, env)), ]): yield env
def _run(self): vol_format = sc.name2type(self.vol_info.vol_format) with self.sd_manifest.domain_lock(self.host_id): image_res_ns = sd.getNamespace(self.sd_manifest.sdUUID, IMAGE_NAMESPACE) with rmanager.acquireResource(image_res_ns, self.vol_info.img_id, rm.LockType.exclusive): artifacts = self.sd_manifest.get_volume_artifacts( self.vol_info.img_id, self.vol_info.vol_id) artifacts.create(self.vol_info.virtual_size, vol_format, self.vol_info.disk_type, self.vol_info.description, self.vol_info.parent, self.vol_info.initial_size) artifacts.commit()
def test_sd_version_no_support_compat(self, env_type): fmt = sc.name2type('cow') job_id = make_uuid() with self.make_env(env_type, fmt, sd_version=3) as env: env_vol = env.chain[0] generation = env_vol.getMetaParam(sc.GENERATION) vol = dict(endpoint_type='div', sd_id=env_vol.sdUUID, img_id=env_vol.imgUUID, vol_id=env_vol.volUUID, generation=generation) qcow2_attr = dict(compat='1.1') job = amend_volume.Job(job_id, 0, vol, qcow2_attr) job.run() self.assertEqual(jobs.STATUS.FAILED, job.status) self.assertEqual(type(job.error), se.GeneralException) self.assertEqual(sc.LEGAL_VOL, env_vol.getLegality()) self.assertEqual(generation, env_vol.getMetaParam(sc.GENERATION))
def make_env(self, sd_type='file', format='raw', chain_len=2, shared=False): size = MiB base_fmt = sc.name2type(format) with fake_env(sd_type) as env: with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, 'rm', FakeResourceManager()) env.chain = make_qemu_chain(env, size, base_fmt, chain_len) def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain yield env
def make_env(self, sd_type='file', format='raw', chain_len=2, shared=False): size = 1048576 base_fmt = sc.name2type(format) with fake_env(sd_type) as env: with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, 'rm', FakeResourceManager()) env.chain = make_qemu_chain(env, size, base_fmt, chain_len) def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain yield env
def test_amend(fake_scheduler, env_type): fmt = sc.name2type('cow') job_id = make_uuid() with make_env(env_type, fmt, sd_version=4, qcow2_compat='0.10') as env: env_vol = env.chain[0] generation = env_vol.getMetaParam(sc.GENERATION) assert env_vol.getQemuImageInfo()['compat'] == '0.10' vol = dict(endpoint_type='div', sd_id=env_vol.sdUUID, img_id=env_vol.imgUUID, vol_id=env_vol.volUUID, generation=generation) qcow2_attr = dict(compat='1.1') job = amend_volume.Job(job_id, 0, vol, qcow2_attr) job.run() assert jobs.STATUS.DONE == job.status assert env_vol.getQemuImageInfo()['compat'] == '1.1' assert env_vol.getMetaParam(sc.GENERATION) == generation + 1
def test_sd_version_no_support_compat(fake_scheduler, env_type): fmt = sc.name2type('cow') job_id = make_uuid() with make_env(env_type, fmt, sd_version=3) as env: env_vol = env.chain[0] generation = env_vol.getMetaParam(sc.GENERATION) vol = dict(endpoint_type='div', sd_id=env_vol.sdUUID, img_id=env_vol.imgUUID, vol_id=env_vol.volUUID, generation=generation) qcow2_attr = dict(compat='1.1') job = amend_volume.Job(job_id, 0, vol, qcow2_attr) job.run() assert job.status == jobs.STATUS.FAILED assert type(job.error) == se.GeneralException assert env_vol.getLegality() == sc.LEGAL_VOL assert env_vol.getMetaParam(sc.GENERATION) == generation
def test_amend(self, env_type): fmt = sc.name2type('cow') job_id = make_uuid() with self.make_env(env_type, fmt, sd_version=4, qcow2_compat='0.10') as env: env_vol = env.chain[0] generation = env_vol.getMetaParam(sc.GENERATION) self.assertEqual('0.10', env_vol.getQemuImageInfo()['compat']) vol = dict(endpoint_type='div', sd_id=env_vol.sdUUID, img_id=env_vol.imgUUID, vol_id=env_vol.volUUID, generation=generation) qcow2_attr = dict(compat='1.1') job = amend_volume.Job(job_id, 0, vol, qcow2_attr) job.run() self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual('1.1', env_vol.getQemuImageInfo()['compat']) self.assertEqual(generation + 1, env_vol.getMetaParam(sc.GENERATION))
def make_env(env_type, base, top): img_id = make_uuid() base_id = make_uuid() top_id = make_uuid() if env_type == 'block' and base.format == 'raw': prealloc = sc.PREALLOCATED_VOL else: prealloc = sc.SPARSE_VOL with fake_env(env_type) as env: env.make_volume(base.virtual * GB, img_id, base_id, vol_format=sc.name2type(base.format), prealloc=prealloc) env.make_volume(top.virtual * GB, img_id, top_id, parent_vol_id=base_id, vol_format=sc.COW_FORMAT) env.subchain = merge.SubchainInfo( dict(sd_id=env.sd_manifest.sdUUID, img_id=img_id, base_id=base_id, top_id=top_id), 0) if env_type == 'block': # Simulate allocation by adjusting the LV sizes env.lvm.extendLV(env.sd_manifest.sdUUID, base_id, base.physical * GB // MB) env.lvm.extendLV(env.sd_manifest.sdUUID, top_id, top.physical * GB // MB) with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, 'rm', FakeResourceManager()) mp.setattr(blockVolume, 'sdCache', env.sdcache) mp.setattr( image.Image, 'getChain', lambda self, sdUUID, imgUUID: [env.subchain.base_vol, env.subchain.top_vol]) mp.setattr( blockVolume.BlockVolume, 'extendSize', partial(fake_blockVolume_extendSize, env)) mp.setattr( fileVolume.FileVolume, 'extendSize', partial(fake_fileVolume_extendSize, env)) yield env
def make_env(self, sd_type='file', format='raw', chain_len=2, shared=False): size = 1048576 base_fmt = sc.name2type(format) with fake_env(sd_type) as env: rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (merge, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), ]): env.chain = make_qemu_chain(env, size, base_fmt, chain_len) def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain yield env
def test_clear_missing_bitmaps(fake_scheduler, env_type): with make_env(env_type, sc.name2type('cow')) as env: top_vol = env.chain[0] generation = top_vol.getMetaParam(sc.GENERATION) vol = { 'endpoint_type': 'div', 'sd_id': top_vol.sdUUID, 'img_id': top_vol.imgUUID, 'vol_id': top_vol.volUUID, 'generation': generation } job = clear_bitmaps.Job(make_uuid(), 0, vol) job.run() assert jobs.STATUS.DONE == job.status vol_info = qemuimg.info(top_vol.getVolumePath()) bitmaps = vol_info["format-specific"]["data"].get("bitmaps", []) assert not bitmaps assert top_vol.getMetaParam(sc.GENERATION) == generation + 1
def create_volume(env, format, allocation, parent=None): vol_id = str(uuid.uuid4()) if parent: img_id = parent.imgUUID parent_vol_id = parent.volUUID else: img_id = str(uuid.uuid4()) parent_vol_id = sc.BLANK_UUID env.make_volume(env.virtual_size, img_id, vol_id, parent_vol_id=parent_vol_id, vol_format=sc.str2fmt(format), prealloc=sc.name2type(allocation), qcow2_compat="1.1") return env.sd_manifest.produceVolume(img_id, vol_id)
def test_vol_type_not_qcow(fake_scheduler, env_type): job_id = make_uuid() bitmap = "bitmap" with make_env(env_type, sc.name2type('raw')) as env: env_vol = env.chain[0] generation = env_vol.getMetaParam(sc.GENERATION) vol = dict(endpoint_type='div', sd_id=env_vol.sdUUID, img_id=env_vol.imgUUID, vol_id=env_vol.volUUID, generation=generation) job = add_bitmap.Job(job_id, 0, vol, bitmap) job.run() assert job.status == jobs.STATUS.FAILED assert type(job.error) == se.GeneralException assert env_vol.getLegality() == sc.LEGAL_VOL assert env_vol.getMetaParam(sc.GENERATION) == generation
def test_qemu_bitmap_add_failure(fake_scheduler, monkeypatch, env_type): monkeypatch.setattr(qemuimg, "bitmap_add", failure) job_id = make_uuid() with make_env(env_type, sc.name2type('cow')) as env: env_vol = env.chain[0] generation = env_vol.getMetaParam(sc.GENERATION) vol = dict(endpoint_type='div', sd_id=env_vol.sdUUID, img_id=env_vol.imgUUID, vol_id=env_vol.volUUID, generation=generation) job = add_bitmap.Job(job_id, 0, vol, "bitmap") job.run() assert job.status == jobs.STATUS.FAILED assert type(job.error) == exception.AddBitmapError assert env_vol.getLegality() == sc.LEGAL_VOL assert env_vol.getMetaParam(sc.GENERATION) == generation
def test_vol_type_not_qcow(fake_scheduler, env_type): with make_env(env_type, sc.name2type('raw')) as env: top_vol = env.chain[0] generation = top_vol.getMetaParam(sc.GENERATION) vol = { 'endpoint_type': 'div', 'sd_id': top_vol.sdUUID, 'img_id': top_vol.imgUUID, 'vol_id': top_vol.volUUID, 'generation': generation } # Clear bitmap failed for RAW volume job = clear_bitmaps.Job(make_uuid(), 0, vol) job.run() assert job.status == jobs.STATUS.FAILED assert type(job.error) == se.UnsupportedOperation assert top_vol.getLegality() == sc.LEGAL_VOL assert top_vol.getMetaParam(sc.GENERATION) == generation
def make_env(self, sd_type="file", format="raw", chain_len=2, shared=False): size = 1048576 base_fmt = sc.name2type(format) with fake_env(sd_type) as env: rm = FakeResourceManager() with MonkeyPatchScope( [ (guarded, "context", fake_guarded_context()), (merge, "sdCache", env.sdcache), (blockVolume, "rm", rm), (volume.VolumeManifest, "isShared", lambda self: shared), ] ): env.chain = make_qemu_chain(env, size, base_fmt, chain_len) def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain yield env
def test_add_bitmap(fake_scheduler, env_type): job_id = make_uuid() bitmap = "bitmap" with make_env(env_type, sc.name2type('cow')) as env: env_vol = env.chain[0] generation = env_vol.getMetaParam(sc.GENERATION) vol = dict(endpoint_type='div', sd_id=env_vol.sdUUID, img_id=env_vol.imgUUID, vol_id=env_vol.volUUID, generation=generation) job = add_bitmap.Job(job_id, 0, vol, bitmap) job.run() assert jobs.STATUS.DONE == job.status vol_info = qemuimg.info(env_vol.getVolumePath()) qcow2_data = vol_info["format-specific"]["data"] assert len(qcow2_data["bitmaps"]) == 1 assert qcow2_data["bitmaps"][0]["name"] == bitmap assert env_vol.getMetaParam(sc.GENERATION) == generation + 1
def make_env(self, sd_type="block", format="raw", chain_len=2): size = 1048576 base_fmt = sc.name2type(format) with fake_env(sd_type) as env: rm = FakeResourceManager() with MonkeyPatchScope( [ (guarded, "context", fake_guarded_context()), (merge, "sdCache", env.sdcache), (blockVolume, "rm", rm), (image, "Image", FakeImage), ] ): env.chain = make_qemu_chain(env, size, base_fmt, chain_len) def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain image.Image.syncVolumeChain = FakeSyncVolumeChain() yield env
def make_env(self, sd_type='block', format='raw', chain_len=2): size = 1048576 base_fmt = sc.name2type(format) with fake_env(sd_type) as env: with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, 'rm', FakeResourceManager()) mp.setattr(image, 'Image', FakeImage) env.chain = make_qemu_chain(env, size, base_fmt, chain_len) volumes = {(vol.imgUUID, vol.volUUID): FakeVolume() for vol in env.chain} env.sdcache.domains[env.sd_manifest.sdUUID].volumes = volumes def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain image.Image.syncVolumeChain = FakeSyncVolumeChain() yield env
def make_env(self, sd_type='block', format='raw', chain_len=2): size = 1048576 base_fmt = sc.name2type(format) with fake_env(sd_type) as env: rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (merge, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), (image, 'Image', FakeImage), ]): env.chain = make_qemu_chain(env, size, base_fmt, chain_len) volumes = {(vol.imgUUID, vol.volUUID): FakeVolume() for vol in env.chain} env.sdcache.domains[env.sd_manifest.sdUUID].volumes = volumes def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain image.Image.syncVolumeChain = FakeSyncVolumeChain() yield env
def getType(self): return sc.name2type(self.getMetaParam(sc.TYPE))
def getFormat(self): return sc.name2type(self.getMetaParam(sc.FORMAT))