def test_subchain_validation(self): job_id = make_uuid() with self.make_env(sd_type='file', chain_len=2) as env: write_qemu_chain(env.chain) base_index = 0 top_index = 1 base_vol = env.chain[base_index] base_vol.setLegality(sc.ILLEGAL_VOL) top_vol = env.chain[top_index] subchain_info = dict(sd_id=top_vol.sdUUID, img_id=top_vol.imgUUID, base_id=base_vol.imgUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) def fail(): raise se.VolumeIsNotInChain(None, None, None) # We already tested that subchain validate does the right thing, # here we test that this job care to call subchain validate. subchain.validate = fail job = api_merge.Job(job_id, subchain) job.run() self.assertEqual(job.status, jobs.STATUS.FAILED) self.assertEqual(type(job.error), se.VolumeIsNotInChain) # Check that validate is called *before* attempting - verify that # the chain data was *not* merged offset = base_index * 1024 pattern = 0xf0 + base_index verify_pattern(base_vol.volumePath, qemuimg.FORMAT.RAW, offset=offset, len=1024, pattern=pattern) self.assertEqual(base_vol.getMetaParam(sc.GENERATION), 0)
def test_intra_domain_copy(self, env_type, src_fmt, dst_fmt): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) job_id = make_uuid() with self.make_env(env_type, src_fmt, dst_fmt) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] write_qemu_chain(env.src_chain) self.assertRaises(qemuio.VerificationError, verify_qemu_chain, env.dst_chain) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(100.0, job.progress) self.assertNotIn('error', job.info()) verify_qemu_chain(env.dst_chain) self.assertEqual(sc.fmt2str(dst_fmt), qemuimg.info(dst_vol.volumePath)['format'])
def test_intra_domain_copy(self, env_type, src_fmt, dst_fmt): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) job_id = make_uuid() with self.make_env(env_type, src_fmt, dst_fmt) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] write_qemu_chain(env.src_chain) self.assertRaises(ChainVerificationError, verify_qemu_chain, env.dst_chain) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(100.0, job.progress) self.assertNotIn('error', job.info()) verify_qemu_chain(env.dst_chain) self.assertEqual(sc.fmt2str(dst_fmt), qemuimg.info(dst_vol.volumePath)['format'])
def test_preallocated_file_volume_copy(self): job_id = make_uuid() with self.make_env('file', sc.RAW_FORMAT, sc.RAW_FORMAT, prealloc=sc.PREALLOCATED_VOL) as env: write_qemu_chain(env.src_chain) src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual( qemuimg.info(dst_vol.volumePath)['virtualsize'], qemuimg.info(dst_vol.volumePath)['actualsize'])
def test_volume_chain_copy(self, env_type, src_fmt, dst_fmt, copy_seq): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) nr_vols = len(copy_seq) with self.make_env(env_type, src_fmt, dst_fmt, chain_length=nr_vols) as env: write_qemu_chain(env.src_chain) for index in copy_seq: job_id = make_uuid() src_vol = env.src_chain[index] dst_vol = env.dst_chain[index] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) verify_qemu_chain(env.dst_chain)
def test_intra_domain_copy(env_type, src_fmt, dst_fmt): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) job_id = make_uuid() with make_env(env_type, src_fmt, dst_fmt) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] write_qemu_chain(env.src_chain) with pytest.raises(qemuio.VerificationError): verify_qemu_chain(env.dst_chain) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() assert (sorted(expected_locks(src_vol, dst_vol)) == sorted( guarded.context.locks)) assert jobs.STATUS.DONE == job.status assert 100.0 == job.progress assert 'error' not in job.info() verify_qemu_chain(env.dst_chain) assert (sc.fmt2str(dst_fmt) == qemuimg.info( dst_vol.volumePath)['format'])
def test_merge_subchain_with_bitmaps( self, sd_type, chain_len, base_index, top_index): job_id = make_uuid() bitmap1_name = 'bitmap1' bitmap2_name = 'bitmap2' with self.make_env( sd_type=sd_type, chain_len=chain_len, base_format=sc.COW_FORMAT, qcow2_compat='1.1') as env: base_vol = env.chain[base_index] top_vol = env.chain[top_index] # Add new bitmap to base_vol and top_vol for vol in [base_vol, top_vol]: op = qemuimg.bitmap_add( vol.getVolumePath(), bitmap1_name, ) op.run() # Add another bitmap to top_vol only # to test add + merge op = qemuimg.bitmap_add( top_vol.getVolumePath(), bitmap2_name, ) op.run() # Writing data to the chain to modify the bitmaps write_qemu_chain(env.chain) subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) job = api_merge.Job(job_id, subchain, merge_bitmaps=True) job.run() self.assertEqual(job.status, jobs.STATUS.DONE) info = qemuimg.info(base_vol.getVolumePath()) # TODO: we should improve this test by adding a # a verification to the extents that are reported # by qemu-nbd. assert info['format-specific']['data']['bitmaps'] == [ { "flags": ["auto"], "name": bitmap1_name, "granularity": 65536 }, { "flags": ["auto"], "name": bitmap2_name, "granularity": 65536 }, ]
def test_pattern_written_to_base_raises(self, storage_type): with fake_env(storage_type) as env: vol_list = make_qemu_chain(env, MB, sc.RAW_FORMAT, 3) # Writes the entire pattern into the base volume bad_list = vol_list[:1] * 3 write_qemu_chain(bad_list) self.assertRaises(qemuio.VerificationError, verify_qemu_chain, vol_list)
def test_pattern_written_to_base_raises(storage_type): with fake_env(storage_type) as env: vol_list = make_qemu_chain(env, MiB, sc.RAW_FORMAT, 3) # Writes the entire pattern into the base volume. bad_list = vol_list[:1] * 3 write_qemu_chain(bad_list) with pytest.raises(qemuio.VerificationError): verify_qemu_chain(vol_list)
def test_preallocated_file_volume_copy(self): job_id = make_uuid() with self.make_env('file', sc.RAW_FORMAT, sc.RAW_FORMAT, prealloc=sc.PREALLOCATED_VOL) as env: write_qemu_chain(env.src_chain) src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() self.assertEqual( qemuimg.info(dst_vol.volumePath)['virtualsize'], qemuimg.info(dst_vol.volumePath)['actualsize'])
def test_merge_subchain(self, sd_type, chain_len, base_index, top_index): job_id = make_uuid() with self.make_env(sd_type=sd_type, chain_len=chain_len) as env: write_qemu_chain(env.chain) base_vol = env.chain[base_index] top_vol = env.chain[top_index] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) job = api_merge.Job(job_id, subchain) job.run() wait_for_job(job) self.assertEqual(job.status, jobs.STATUS.DONE) # Verify that the chain data was merged for i in range(base_index, top_index + 1): offset = i * 1024 pattern = 0xf0 + i # We expect to read all data from top verify_pattern( top_vol.volumePath, qemuimg.FORMAT.QCOW2, offset=offset, len=1024, pattern=pattern) # And base, since top was merged into base verify_pattern( base_vol.volumePath, sc.fmt2str(base_vol.getFormat()), offset=offset, len=1024, pattern=pattern) self.assertEqual(sorted(self.expected_locks(base_vol)), sorted(guarded.context.locks)) self.assertEqual(base_vol.getLegality(), sc.LEGAL_VOL) self.assertEqual(base_vol.getMetaParam(sc.GENERATION), 1)
def test_volume_chain_copy(self, env_type, src_fmt, dst_fmt, copy_seq): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) nr_vols = len(copy_seq) with self.make_env(env_type, src_fmt, dst_fmt, chain_length=nr_vols) as env: write_qemu_chain(env.src_chain) for index in copy_seq: job_id = make_uuid() src_vol = env.src_chain[index] dst_vol = env.dst_chain[index] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) verify_qemu_chain(env.dst_chain)
def test_merge_illegal_volume(self, volume): job_id = make_uuid() with self.make_env(sd_type='block', chain_len=2) as env: write_qemu_chain(env.chain) base_vol = env.chain[0] top_vol = env.chain[1] if volume == 'base': base_vol.setLegality(sc.ILLEGAL_VOL) else: top_vol.setLegality(sc.ILLEGAL_VOL) subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) job = api_merge.Job(job_id, subchain) job.run() self.assertEqual(job.status, jobs.STATUS.FAILED) self.assertEqual(type(job.error), se.prepareIllegalVolumeError)
def test_copy_to_preallocated_file(): job_id = make_uuid() with make_env('file', sc.RAW_FORMAT, sc.RAW_FORMAT, prealloc=sc.PREALLOCATED_VOL) as env: write_qemu_chain(env.src_chain) src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() info = qemuimg.info(dst_vol.volumePath) assert info["virtual-size"] == info["actual-size"]
def test_verify_chain(self, storage_type): with fake_env(storage_type) as env: vol_list = make_qemu_chain(env, MB, sc.RAW_FORMAT, 2) write_qemu_chain(vol_list) verify_qemu_chain(vol_list)
def test_reversed_chain_raises(self, storage_type): with fake_env(storage_type) as env: vol_list = make_qemu_chain(env, MB, sc.RAW_FORMAT, 2) write_qemu_chain(reversed(vol_list)) self.assertRaises(qemuio.VerificationError, verify_qemu_chain, vol_list)
def test_reversed_chain_raises(storage_type): with fake_env(storage_type) as env: vol_list = make_qemu_chain(env, MiB, sc.RAW_FORMAT, 2) write_qemu_chain(reversed(vol_list)) with pytest.raises(qemuio.VerificationError): verify_qemu_chain(vol_list)
def test_verify_chain(storage_type): with fake_env(storage_type) as env: vol_list = make_qemu_chain(env, MiB, sc.RAW_FORMAT, 2) write_qemu_chain(vol_list) verify_qemu_chain(vol_list)