def test_volume_operation(self, env_type, error, final_legality, final_status, final_gen): job_id = make_uuid() fmt = sc.RAW_FORMAT with self.make_env(env_type, fmt, fmt) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] self.assertEqual(sc.LEGAL_VOL, dst_vol.getLegality()) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID, generation=0) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID, generation=0) fake_convert = FakeQemuConvertChecker(src_vol, dst_vol, error=error) with MonkeyPatchScope([(qemuimg, 'convert', fake_convert)]): job = copy_data.Job(job_id, 0, source, dest) job.run() self.assertEqual(final_status, job.status) self.assertEqual(final_legality, dst_vol.getLegality()) self.assertEqual(final_gen, dst_vol.getMetaParam(sc.GENERATION))
def test_abort_during_copy(self, env_type): fmt = sc.RAW_FORMAT with self.make_env(env_type, fmt, fmt) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] gen_id = dst_vol.getMetaParam(sc.GENERATION) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID, generation=0) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID, generation=gen_id) fake_convert = FakeQemuConvertChecker(src_vol, dst_vol, wait_for_abort=True) with MonkeyPatchScope([(qemuimg, 'convert', fake_convert)]): job_id = make_uuid() job = copy_data.Job(job_id, 0, source, dest) t = start_thread(job.run) if not fake_convert.ready_event.wait(1): raise RuntimeError("Timeout waiting for thread") job.abort() t.join(1) if t.isAlive(): raise RuntimeError("Timeout waiting for thread") self.assertEqual(jobs.STATUS.ABORTED, job.status) self.assertEqual(sc.ILLEGAL_VOL, dst_vol.getLegality()) self.assertEqual(gen_id, dst_vol.getMetaParam(sc.GENERATION))
def test_intra_domain_copy(self, env_type, src_fmt, dst_fmt): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) job_id = make_uuid() with self.make_env(env_type, src_fmt, dst_fmt) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] write_qemu_chain(env.src_chain) self.assertRaises(ChainVerificationError, verify_qemu_chain, env.dst_chain) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(100.0, job.progress) self.assertNotIn('error', job.info()) verify_qemu_chain(env.dst_chain) self.assertEqual(sc.fmt2str(dst_fmt), qemuimg.info(dst_vol.volumePath)['format'])
def test_bad_vm_configuration_volume(self): """ When copying a volume containing VM configuration information the volume format may be set incorrectly due to an old bug. Check that the workaround we have in place allows the copy to proceed without error. """ job_id = make_uuid() vm_conf_size = workarounds.VM_CONF_SIZE_BLK * sc.BLOCK_SIZE vm_conf_data = "VM Configuration" with self.make_env('file', sc.COW_FORMAT, sc.COW_FORMAT, size=vm_conf_size) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] # Corrupt the COW volume by writing raw data. This simulates how # these "problem" volumes were created in the first place. with open(src_vol.getVolumePath(), "w") as f: f.write(vm_conf_data) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) # Verify that the copy succeeded with open(dst_vol.getVolumePath(), "r") as f: # Qemu pads the file to a 1k boundary with null bytes self.assertTrue(f.read().startswith(vm_conf_data))
def test_intra_domain_copy(env_type, src_fmt, dst_fmt): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) job_id = make_uuid() with make_env(env_type, src_fmt, dst_fmt) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] write_qemu_chain(env.src_chain) with pytest.raises(qemuio.VerificationError): verify_qemu_chain(env.dst_chain) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() assert (sorted(expected_locks(src_vol, dst_vol)) == sorted( guarded.context.locks)) assert jobs.STATUS.DONE == job.status assert 100.0 == job.progress assert 'error' not in job.info() verify_qemu_chain(env.dst_chain) assert (sc.fmt2str(dst_fmt) == qemuimg.info( dst_vol.volumePath)['format'])
def test_qcow2_compat(self, env_type, qcow2_compat, sd_version): src_fmt = sc.name2type("cow") dst_fmt = sc.name2type("cow") job_id = make_uuid() with self.make_env(env_type, src_fmt, dst_fmt, sd_version=sd_version, src_qcow2_compat=qcow2_compat) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() actual_compat = qemuimg.info(dst_vol.volumePath)['compat'] self.assertEqual(actual_compat, env.sd_manifest.qcow2_compat())
def test_copy_bitmaps_fail_raw_format(user_mount, fake_scheduler, env_type, dst_fmt): job_id = make_uuid() data_center = os.path.join(user_mount.path, "data-center") with make_env(env_type, sc.COW_FORMAT, dst_fmt, sd_version=5, src_qcow2_compat='1.1', data_center=data_center) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] op = qemuimg.bitmap_add(src_vol.getVolumePath(), 'bitmap') op.run() source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest, copy_bitmaps=True) job.run() # copy bitmaps are not supported for raw volumes assert jobs.STATUS.FAILED == job.status assert 'error' in job.info()
def test_preallocated_file_volume_copy(self): job_id = make_uuid() with self.make_env('file', sc.RAW_FORMAT, sc.RAW_FORMAT, prealloc=sc.PREALLOCATED_VOL) as env: write_qemu_chain(env.src_chain) src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual( qemuimg.info(dst_vol.volumePath)['virtualsize'], qemuimg.info(dst_vol.volumePath)['actualsize'])
def test_volume_chain_copy(self, env_type, src_fmt, dst_fmt, copy_seq): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) nr_vols = len(copy_seq) with self.make_env(env_type, src_fmt, dst_fmt, chain_length=nr_vols) as env: write_qemu_chain(env.src_chain) for index in copy_seq: job_id = make_uuid() src_vol = env.src_chain[index] dst_vol = env.dst_chain[index] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) verify_qemu_chain(env.dst_chain)
def test_copy_data_collapse(tmpdir, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_scheduler, monkeypatch, dest_format, sd_version): dom = tmp_repo.create_localfs_domain(name="domain", version=sd_version) chain_size = 3 volumes = create_chain(dom, chain_size) dest_img_id = str(uuid.uuid4()) dest_vol_id = str(uuid.uuid4()) length = MiB # Write some data to each layer for i, vol in enumerate(volumes): qemuio.write_pattern(vol.getVolumePath(), sc.fmt2str(vol.getFormat()), offset=(i * length)) # The last volume in the chain is the leaf source_leaf_vol = volumes[-1] dest_vol = create_volume(dom, dest_img_id, dest_vol_id, volFormat=dest_format) source = dict(endpoint_type='div', sd_id=source_leaf_vol.sdUUID, img_id=source_leaf_vol.imgUUID, vol_id=source_leaf_vol.volUUID) dest = dict(endpoint_type='div', sd_id=source_leaf_vol.sdUUID, img_id=dest_img_id, vol_id=dest_vol_id) # Run copy_data from the source chain to dest_vol, essentially # executing qemu-img convert job = copy_data.Job(str(uuid.uuid4()), 0, source, dest) monkeypatch.setattr(guarded, 'context', fake_guarded_context()) job.run() # Source chain and destination image must have the same data but allocation # may differ. op = qemuimg.compare(source_leaf_vol.getVolumePath(), dest_vol.getVolumePath(), img1_format='qcow2', img2_format=sc.fmt2str(dest_format), strict=False) op.run() # Destination actual size should be smaller than source chain actual size, # since we have only one qcow2 header (qcow2), or no header (raw). src_actual_size = sum( qemuimg.info(vol.getVolumePath())["actualsize"] for vol in volumes) dst_actual_size = qemuimg.info(dest_vol.getVolumePath())["actualsize"] assert dst_actual_size < src_actual_size
def test_volume_chain_copy_with_bitmaps(user_mount, fake_scheduler, env_type, sd_version, copy_seq): bitmaps = ['bitmap1', 'bitmap2'] data_center = os.path.join(user_mount.path, "data-center") with make_env(env_type, sc.COW_FORMAT, sc.COW_FORMAT, chain_length=len(copy_seq), sd_version=sd_version, src_qcow2_compat='1.1', data_center=data_center) as env: for index in copy_seq: # Add bitmaps to src volume vol_path = env.src_chain[index].getVolumePath() for bitmap in bitmaps: op = qemuimg.bitmap_add(vol_path, bitmap) op.run() job_id = make_uuid() src_vol = env.src_chain[index] dst_vol = env.dst_chain[index] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest, copy_bitmaps=True) job.run() for index in copy_seq: dst_vol = env.dst_chain[index] info = qemuimg.info(dst_vol.getVolumePath()) assert info["format-specific"]["data"]["bitmaps"] == [ { "flags": ["auto"], "name": bitmaps[0], "granularity": 65536 }, { "flags": ["auto"], "name": bitmaps[1], "granularity": 65536 }, ]
def test_wrong_generation(self): fmt = sc.RAW_FORMAT with self.make_env('block', fmt, fmt) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] generation = dst_vol.getMetaParam(sc.GENERATION) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID, generation=0) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID, generation=generation + 1) job_id = make_uuid() job = copy_data.Job(job_id, 0, source, dest) job.run() self.assertEqual(jobs.STATUS.FAILED, job.status) self.assertEqual(se.GenerationMismatch.code, job.error.code) self.assertEqual(sc.LEGAL_VOL, dst_vol.getLegality()) self.assertEqual(generation, dst_vol.getMetaParam(sc.GENERATION))
def test_copy_data_collapse(tmpdir, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_scheduler, monkeypatch, dest_format): dom = tmp_repo.create_localfs_domain(name="domain", version=5) chain_size = 3 volumes = create_chain(dom, chain_size) dest_img_id = str(uuid.uuid4()) dest_vol_id = str(uuid.uuid4()) length = MEGAB # Write some data to each layer for i, vol in enumerate(volumes): qemuio.write_pattern(vol.getVolumePath(), sc.fmt2str(vol.getFormat()), offset=(i * length)) # The last volume in the chain is the leaf source_leaf_vol = volumes[-1] dest_vol = create_volume(dom, dest_img_id, dest_vol_id, dest_format) source = dict(endpoint_type='div', sd_id=source_leaf_vol.sdUUID, img_id=source_leaf_vol.imgUUID, vol_id=source_leaf_vol.volUUID) dest = dict(endpoint_type='div', sd_id=source_leaf_vol.sdUUID, img_id=dest_img_id, vol_id=dest_vol_id) # Run copy_data from the source chain to dest_vol, essentially # executing qemu-img convert job = copy_data.Job(str(uuid.uuid4()), 0, source, dest) monkeypatch.setattr(guarded, 'context', fake_guarded_context()) job.run() # verify the data written to the source chain is available on the # collapsed target volume for i in range(chain_size): qemuio.verify_pattern(dest_vol.getVolumePath(), sc.fmt2str(dest_vol.getFormat()), offset=(i * length))
def test_copy_data_illegal(tmpdir, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_scheduler, monkeypatch, sd_version=5): dom = tmp_repo.create_localfs_domain(name="domain", version=sd_version) source_img_id = str(uuid.uuid4()) source_vol_id = str(uuid.uuid4()) dest_img_id = str(uuid.uuid4()) dest_vol_id = str(uuid.uuid4()) source_vol = create_volume(dom, source_img_id, source_vol_id, volFormat=sc.RAW_FORMAT) dest_vol = create_volume(dom, dest_img_id, dest_vol_id, volFormat=sc.COW_FORMAT, legal=False) source = dict(endpoint_type='div', sd_id=source_vol.sdUUID, img_id=source_vol.imgUUID, vol_id=source_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dest_vol.sdUUID, img_id=dest_img_id, vol_id=dest_vol_id) job = copy_data.Job(str(uuid.uuid4()), 0, source, dest) monkeypatch.setattr(guarded, 'context', fake_guarded_context()) job.run() assert jobs.STATUS.DONE == job.status
def test_qcow2_compat(user_mount, fake_scheduler, qcow2_compat, sd_version): src_fmt = sc.name2type("cow") dst_fmt = sc.name2type("cow") job_id = make_uuid() data_center = os.path.join(user_mount.path, "data-center") with make_env("file", src_fmt, dst_fmt, sd_version=sd_version, src_qcow2_compat=qcow2_compat, data_center=data_center) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() dst_info = qemuimg.info(dst_vol.volumePath) actual_compat = dst_info['format-specific']['data']['compat'] assert actual_compat == env.sd_manifest.qcow2_compat() # After the copy, images must be exactly the same. op = qemuimg.compare( src_vol.getVolumePath(), dst_vol.getVolumePath(), img1_format='qcow2', img2_format='qcow2', strict=True, ) op.run()
def test_copy_to_preallocated_file(): job_id = make_uuid() with make_env('file', sc.RAW_FORMAT, sc.RAW_FORMAT, prealloc=sc.PREALLOCATED_VOL) as env: write_qemu_chain(env.src_chain) src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() info = qemuimg.info(dst_vol.volumePath) assert info["virtual-size"] == info["actual-size"]