def make_env(self, sd_type='block', format='raw', prealloc=sc.SPARSE_VOL, chain_len=2): size = 2 * GiB base_fmt = sc.name2type(format) with fake_env(sd_type) as env: with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, 'rm', FakeResourceManager()) mp.setattr(image, 'Image', FakeImage) env.chain = make_qemu_chain(env, size, base_fmt, chain_len, prealloc=prealloc) volumes = {(vol.imgUUID, vol.volUUID): FakeVolume() for vol in env.chain} env.sdcache.domains[env.sd_manifest.sdUUID].volumes = volumes def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain image.Image.syncVolumeChain = FakeSyncVolumeChain() yield env
def make_env(self, storage_type, src_fmt, dst_fmt, chain_length=1, size=DEFAULT_SIZE, sd_version=3, src_qcow2_compat='0.10', prealloc=sc.SPARSE_VOL): with fake_env(storage_type, sd_version=sd_version) as env: rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (copy_data, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), ]): # Create existing volume - may use compat 0.10 or 1.1. src_vols = make_qemu_chain(env, size, src_fmt, chain_length, qcow2_compat=src_qcow2_compat, prealloc=prealloc) # New volumes are always created using the domain # prefered format. sd_compat = env.sd_manifest.qcow2_compat() dst_vols = make_qemu_chain(env, size, dst_fmt, chain_length, qcow2_compat=sd_compat, prealloc=prealloc) env.src_chain = src_vols env.dst_chain = dst_vols yield env
def make_env(self, sd_type, chain_len=2, base_format=sc.RAW_FORMAT, qcow2_compat='0.10'): size = MiB base_fmt = base_format with fake_env(sd_type) as env: rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (image, 'sdCache', env.sdcache), (merge, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), (image, 'Image', FakeImage), ]): env.chain = make_qemu_chain(env, size, base_fmt, chain_len, qcow2_compat=qcow2_compat) def fake_chain(sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain yield env
def test_copy_data_collapse(tmpdir, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_scheduler, monkeypatch, dest_format, sd_version): dom = tmp_repo.create_localfs_domain(name="domain", version=sd_version) chain_size = 3 volumes = create_chain(dom, chain_size) dest_img_id = str(uuid.uuid4()) dest_vol_id = str(uuid.uuid4()) length = MiB # Write some data to each layer for i, vol in enumerate(volumes): qemuio.write_pattern(vol.getVolumePath(), sc.fmt2str(vol.getFormat()), offset=(i * length)) # The last volume in the chain is the leaf source_leaf_vol = volumes[-1] dest_vol = create_volume(dom, dest_img_id, dest_vol_id, volFormat=dest_format) source = dict(endpoint_type='div', sd_id=source_leaf_vol.sdUUID, img_id=source_leaf_vol.imgUUID, vol_id=source_leaf_vol.volUUID) dest = dict(endpoint_type='div', sd_id=source_leaf_vol.sdUUID, img_id=dest_img_id, vol_id=dest_vol_id) # Run copy_data from the source chain to dest_vol, essentially # executing qemu-img convert job = copy_data.Job(str(uuid.uuid4()), 0, source, dest) monkeypatch.setattr(guarded, 'context', fake_guarded_context()) job.run() # Source chain and destination image must have the same data but allocation # may differ. op = qemuimg.compare(source_leaf_vol.getVolumePath(), dest_vol.getVolumePath(), img1_format='qcow2', img2_format=sc.fmt2str(dest_format), strict=False) op.run() # Destination actual size should be smaller than source chain actual size, # since we have only one qcow2 header (qcow2), or no header (raw). src_actual_size = sum( qemuimg.info(vol.getVolumePath())["actualsize"] for vol in volumes) dst_actual_size = qemuimg.info(dest_vol.getVolumePath())["actualsize"] assert dst_actual_size < src_actual_size
def make_env(self, storage_type, fmt=sc.name2type('cow'), chain_length=1, size=DEFAULT_SIZE, qcow2_compat='0.10'): with fake_env(storage_type, sd_version=4) as env: rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (copy_data, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), ]): env.chain = make_qemu_chain(env, size, fmt, chain_length, qcow2_compat=qcow2_compat) yield env
def make_env(env_type, base, top): img_id = make_uuid() base_id = make_uuid() top_id = make_uuid() if env_type == 'block' and base.format == 'raw': prealloc = sc.PREALLOCATED_VOL else: prealloc = sc.SPARSE_VOL with fake_env(env_type) as env: with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, "config", CONFIG) mp.setattr(blockVolume, 'rm', FakeResourceManager()) mp.setattr(blockVolume, 'sdCache', env.sdcache) mp.setattr( image.Image, 'getChain', lambda self, sdUUID, imgUUID: [env.subchain.base_vol, env.subchain.top_vol]) env.make_volume(base.virtual * GiB, img_id, base_id, vol_format=sc.name2type(base.format), prealloc=prealloc, vol_type=sc.INTERNAL_VOL) env.make_volume( top.virtual * GiB, img_id, top_id, parent_vol_id=base_id, vol_format=sc.COW_FORMAT, vol_type=sc.LEAF_VOL if top.leaf else sc.INTERNAL_VOL) env.subchain = merge.SubchainInfo( dict(sd_id=env.sd_manifest.sdUUID, img_id=img_id, base_id=base_id, top_id=top_id), 0) if env_type == 'block': # Simulate allocation by adjusting the LV sizes env.lvm.extendLV(env.sd_manifest.sdUUID, base_id, base.physical * GiB // MiB) env.lvm.extendLV(env.sd_manifest.sdUUID, top_id, top.physical * GiB // MiB) yield env
def make_env(env_type, base, top): img_id = make_uuid() base_id = make_uuid() top_id = make_uuid() if env_type == 'block' and base.format == 'raw': prealloc = sc.PREALLOCATED_VOL else: prealloc = sc.SPARSE_VOL with fake_env(env_type) as env: env.make_volume(base.virtual * GB, img_id, base_id, vol_format=sc.name2type(base.format), prealloc=prealloc) env.make_volume(top.virtual * GB, img_id, top_id, parent_vol_id=base_id, vol_format=sc.COW_FORMAT) env.subchain = merge.SubchainInfo( dict(sd_id=env.sd_manifest.sdUUID, img_id=img_id, base_id=base_id, top_id=top_id), 0) if env_type == 'block': # Simulate allocation by adjusting the LV sizes env.lvm.extendLV(env.sd_manifest.sdUUID, base_id, base.physical * GB / MB) env.lvm.extendLV(env.sd_manifest.sdUUID, top_id, top.physical * GB / MB) rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (merge, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), (blockVolume, 'sdCache', env.sdcache), (image.Image, 'getChain', lambda self, sdUUID, imgUUID: [env.subchain.base_vol, env.subchain.top_vol]), (blockVolume.BlockVolume, 'extendSize', partial(fake_blockVolume_extendSize, env)), (fileVolume.FileVolume, 'extendSize', partial(fake_fileVolume_extendSize, env)), ]): yield env
def make_env(self, sd_type='file', format='raw', chain_len=2, shared=False): size = MiB base_fmt = sc.name2type(format) with fake_env(sd_type) as env: with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, 'rm', FakeResourceManager()) env.chain = make_qemu_chain(env, size, base_fmt, chain_len) def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain yield env
def make_env(self, sd_type='file', format='raw', chain_len=2, shared=False): size = 1048576 base_fmt = sc.name2type(format) with fake_env(sd_type) as env: with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, 'rm', FakeResourceManager()) env.chain = make_qemu_chain(env, size, base_fmt, chain_len) def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain yield env
def make_env(env_type, base, top): img_id = make_uuid() base_id = make_uuid() top_id = make_uuid() if env_type == 'block' and base.format == 'raw': prealloc = sc.PREALLOCATED_VOL else: prealloc = sc.SPARSE_VOL with fake_env(env_type) as env: env.make_volume(base.virtual * GB, img_id, base_id, vol_format=sc.name2type(base.format), prealloc=prealloc) env.make_volume(top.virtual * GB, img_id, top_id, parent_vol_id=base_id, vol_format=sc.COW_FORMAT) env.subchain = merge.SubchainInfo( dict(sd_id=env.sd_manifest.sdUUID, img_id=img_id, base_id=base_id, top_id=top_id), 0) if env_type == 'block': # Simulate allocation by adjusting the LV sizes env.lvm.extendLV(env.sd_manifest.sdUUID, base_id, base.physical * GB // MB) env.lvm.extendLV(env.sd_manifest.sdUUID, top_id, top.physical * GB // MB) with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, 'rm', FakeResourceManager()) mp.setattr(blockVolume, 'sdCache', env.sdcache) mp.setattr( image.Image, 'getChain', lambda self, sdUUID, imgUUID: [env.subchain.base_vol, env.subchain.top_vol]) mp.setattr( blockVolume.BlockVolume, 'extendSize', partial(fake_blockVolume_extendSize, env)) mp.setattr( fileVolume.FileVolume, 'extendSize', partial(fake_fileVolume_extendSize, env)) yield env
def test_copy_data_collapse(tmpdir, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_scheduler, monkeypatch, dest_format): dom = tmp_repo.create_localfs_domain(name="domain", version=5) chain_size = 3 volumes = create_chain(dom, chain_size) dest_img_id = str(uuid.uuid4()) dest_vol_id = str(uuid.uuid4()) length = MEGAB # Write some data to each layer for i, vol in enumerate(volumes): qemuio.write_pattern(vol.getVolumePath(), sc.fmt2str(vol.getFormat()), offset=(i * length)) # The last volume in the chain is the leaf source_leaf_vol = volumes[-1] dest_vol = create_volume(dom, dest_img_id, dest_vol_id, dest_format) source = dict(endpoint_type='div', sd_id=source_leaf_vol.sdUUID, img_id=source_leaf_vol.imgUUID, vol_id=source_leaf_vol.volUUID) dest = dict(endpoint_type='div', sd_id=source_leaf_vol.sdUUID, img_id=dest_img_id, vol_id=dest_vol_id) # Run copy_data from the source chain to dest_vol, essentially # executing qemu-img convert job = copy_data.Job(str(uuid.uuid4()), 0, source, dest) monkeypatch.setattr(guarded, 'context', fake_guarded_context()) job.run() # verify the data written to the source chain is available on the # collapsed target volume for i in range(chain_size): qemuio.verify_pattern(dest_vol.getVolumePath(), sc.fmt2str(dest_vol.getFormat()), offset=(i * length))
def make_env(self, sd_type='file', format='raw', chain_len=2, shared=False): size = 1048576 base_fmt = sc.name2type(format) with fake_env(sd_type) as env: rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (merge, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), ]): env.chain = make_qemu_chain(env, size, base_fmt, chain_len) def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain yield env
def make_env(storage_type, fmt, chain_length=1, size=DEFAULT_SIZE, sd_version=5, qcow2_compat='1.1'): with fake_env(storage_type, sd_version=sd_version) as env: rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (volume_info, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), ]): env.chain = make_qemu_chain(env, size, fmt, chain_length, qcow2_compat=qcow2_compat) yield env
def test_copy_data_illegal(tmpdir, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_scheduler, monkeypatch, sd_version=5): dom = tmp_repo.create_localfs_domain(name="domain", version=sd_version) source_img_id = str(uuid.uuid4()) source_vol_id = str(uuid.uuid4()) dest_img_id = str(uuid.uuid4()) dest_vol_id = str(uuid.uuid4()) source_vol = create_volume(dom, source_img_id, source_vol_id, volFormat=sc.RAW_FORMAT) dest_vol = create_volume(dom, dest_img_id, dest_vol_id, volFormat=sc.COW_FORMAT, legal=False) source = dict(endpoint_type='div', sd_id=source_vol.sdUUID, img_id=source_vol.imgUUID, vol_id=source_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dest_vol.sdUUID, img_id=dest_img_id, vol_id=dest_vol_id) job = copy_data.Job(str(uuid.uuid4()), 0, source, dest) monkeypatch.setattr(guarded, 'context', fake_guarded_context()) job.run() assert jobs.STATUS.DONE == job.status
def make_env(self, sd_type='block', format='raw', chain_len=2): size = 1048576 base_fmt = sc.name2type(format) with fake_env(sd_type) as env: with MonkeyPatch().context() as mp: mp.setattr(guarded, 'context', fake_guarded_context()) mp.setattr(merge, 'sdCache', env.sdcache) mp.setattr(blockVolume, 'rm', FakeResourceManager()) mp.setattr(image, 'Image', FakeImage) env.chain = make_qemu_chain(env, size, base_fmt, chain_len) volumes = {(vol.imgUUID, vol.volUUID): FakeVolume() for vol in env.chain} env.sdcache.domains[env.sd_manifest.sdUUID].volumes = volumes def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain image.Image.syncVolumeChain = FakeSyncVolumeChain() yield env
def make_env(self, sd_type='block', format='raw', chain_len=2): size = 1048576 base_fmt = sc.name2type(format) with fake_env(sd_type) as env: rm = FakeResourceManager() with MonkeyPatchScope([ (guarded, 'context', fake_guarded_context()), (merge, 'sdCache', env.sdcache), (blockVolume, 'rm', rm), (image, 'Image', FakeImage), ]): env.chain = make_qemu_chain(env, size, base_fmt, chain_len) volumes = {(vol.imgUUID, vol.volUUID): FakeVolume() for vol in env.chain} env.sdcache.domains[env.sd_manifest.sdUUID].volumes = volumes def fake_chain(self, sdUUID, imgUUID, volUUID=None): return env.chain image.Image.getChain = fake_chain image.Image.syncVolumeChain = FakeSyncVolumeChain() yield env