def test_subchain_validation(self): job_id = make_uuid() with self.make_env(sd_type='file', chain_len=2) as env: write_qemu_chain(env.chain) base_index = 0 top_index = 1 base_vol = env.chain[base_index] base_vol.setLegality(sc.ILLEGAL_VOL) top_vol = env.chain[top_index] subchain_info = dict(sd_id=top_vol.sdUUID, img_id=top_vol.imgUUID, base_id=base_vol.imgUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) def fail(): raise se.VolumeIsNotInChain(None, None, None) # We already tested that subchain validate does the right thing, # here we test that this job care to call subchain validate. subchain.validate = fail job = api_merge.Job(job_id, subchain) job.run() wait_for_job(job) self.assertEqual(job.status, jobs.STATUS.FAILED) self.assertEqual(type(job.error), se.VolumeIsNotInChain) # Check that validate is called *before* attempting - verify that # the chain data was *not* merged offset = base_index * 1024 pattern = 0xf0 + base_index verify_pattern(base_vol.volumePath, qemuimg.FORMAT.RAW, offset=offset, len=1024, pattern=pattern) self.assertEqual(base_vol.getMetaParam(sc.GENERATION), 0)
def test_intra_domain_copy(self, env_type, src_fmt, dst_fmt): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) job_id = make_uuid() with self.get_vols(env_type, src_fmt, dst_fmt) as (src_chain, dst_chain): src_vol = src_chain[0] dst_vol = dst_chain[0] write_qemu_chain(src_chain) self.assertRaises(ChainVerificationError, verify_qemu_chain, dst_chain) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = storage.sdm.api.copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(100.0, job.progress) self.assertNotIn('error', job.info()) verify_qemu_chain(dst_chain) self.assertEqual(sc.fmt2str(dst_fmt), qemuimg.info(dst_vol.volumePath)['format'])
def test_volume_chain_copy(self, env_type, src_fmt, dst_fmt, copy_seq): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) nr_vols = len(copy_seq) with self.get_vols(env_type, src_fmt, dst_fmt, chain_length=nr_vols) as (src_chain, dst_chain): write_qemu_chain(src_chain) for index in copy_seq: job_id = make_uuid() src_vol = src_chain[index] dst_vol = dst_chain[index] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = storage.sdm.api.copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) verify_qemu_chain(dst_chain)
def test_bad_vm_configuration_volume(self): """ When copying a volume containing VM configuration information the volume format may be set incorrectly due to an old bug. Check that the workaround we have in place allows the copy to proceed without error. """ job_id = make_uuid() vm_conf_size = workarounds.VM_CONF_SIZE_BLK * sc.BLOCK_SIZE vm_conf_data = "VM Configuration" with self.make_env('file', sc.COW_FORMAT, sc.COW_FORMAT, size=vm_conf_size) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] # Corrupt the COW volume by writing raw data. This simulates how # these "problem" volumes were created in the first place. with open(src_vol.getVolumePath(), "w") as f: f.write(vm_conf_data) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = storage.sdm.api.copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) # Verify that the copy succeeded with open(dst_vol.getVolumePath(), "r") as f: # Qemu pads the file to a 1k boundary with null bytes self.assertTrue(f.read().startswith(vm_conf_data))
def test_qcow2_compat(self, env_type, qcow2_compat, sd_version): src_fmt = sc.name2type("cow") dst_fmt = sc.name2type("cow") job_id = make_uuid() with self.make_env(env_type, src_fmt, dst_fmt, sd_version=sd_version, src_qcow2_compat=qcow2_compat) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = storage.sdm.api.copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) actual_compat = qemuimg.info(dst_vol.volumePath)['compat'] self.assertEqual(actual_compat, env.sd_manifest.qcow2_compat())
def test_preallocated_file_volume_copy(self): job_id = make_uuid() with self.make_env('file', sc.RAW_FORMAT, sc.RAW_FORMAT, prealloc=sc.PREALLOCATED_VOL) as env: write_qemu_chain(env.src_chain) src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual( qemuimg.info(dst_vol.volumePath)['virtualsize'], qemuimg.info(dst_vol.volumePath)['actualsize'])
def test_teardown_failure(self): job_id = make_uuid() sp_id = make_uuid() sd_id = make_uuid() img0_id = make_uuid() img1_id = TEARDOWN_ERROR_IMAGE_ID vol0_id = make_uuid() vol1_id = make_uuid() images = [ {'sd_id': sd_id, 'img_id': img0_id, 'vol_id': vol0_id}, {'sd_id': sd_id, 'img_id': img1_id, 'vol_id': vol1_id}, ] expected = [ ('prepareImage', (sd_id, sp_id, img0_id, vol0_id), {'allowIllegal': True}), ('prepareImage', (sd_id, sp_id, img1_id, vol1_id), {'allowIllegal': True}), ('teardownImage', (sd_id, sp_id, img1_id), {}), ('teardownImage', (sd_id, sp_id, img0_id), {}), ] with namedTemporaryDir() as base: irs = FakeIRS(base) job = seal.Job(job_id, sp_id, images, irs) job.autodelete = False job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.FAILED, job.status) self.assertEqual(expected, irs.__calls__)
def test_subchain_validation(self): job_id = make_uuid() with self.make_env(sd_type='file', chain_len=2) as env: write_qemu_chain(env.chain) base_index = 0 top_index = 1 base_vol = env.chain[base_index] base_vol.setLegality(sc.ILLEGAL_VOL) top_vol = env.chain[top_index] subchain_info = dict(sd_id=top_vol.sdUUID, img_id=top_vol.imgUUID, base_id=base_vol.imgUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) def fail(): raise se.VolumeIsNotInChain(None, None, None) # We already tested that subchain validate does the right thing, # here we test that this job care to call subchain validate. subchain.validate = fail job = storage.sdm.api.merge.Job(job_id, subchain) job.run() wait_for_job(job) self.assertEqual(job.status, jobs.STATUS.FAILED) self.assertEqual(type(job.error), se.VolumeIsNotInChain) # Check that validate is called *before* attempting - verify that # the chain data was *not* merged offset = base_index * 1024 pattern = 0xf0 + base_index qemu_pattern_verify(base_vol.volumePath, qemuimg.FORMAT.RAW, offset=offset, len=1024, pattern=pattern) self.assertEqual(base_vol.getMetaParam(sc.GENERATION), 0)
def test_bad_vm_configuration_volume(self): """ When copying a volume containing VM configuration information the volume format may be set incorrectly due to an old bug. Check that the workaround we have in place allows the copy to proceed without error. """ job_id = make_uuid() vm_conf_size = workarounds.VM_CONF_SIZE_BLK * sc.BLOCK_SIZE vm_conf_data = "VM Configuration" with self.make_env('file', sc.COW_FORMAT, sc.COW_FORMAT, size=vm_conf_size) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] # Corrupt the COW volume by writing raw data. This simulates how # these "problem" volumes were created in the first place. with open(src_vol.getVolumePath(), "w") as f: f.write(vm_conf_data) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) # Verify that the copy succeeded with open(dst_vol.getVolumePath(), "r") as f: # Qemu pads the file to a 1k boundary with null bytes self.assertTrue(f.read().startswith(vm_conf_data))
def test_intra_domain_copy(self, env_type, src_fmt, dst_fmt): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) job_id = make_uuid() with self.make_env(env_type, src_fmt, dst_fmt) as env: src_vol = env.src_chain[0] dst_vol = env.dst_chain[0] write_qemu_chain(env.src_chain) self.assertRaises(ChainVerificationError, verify_qemu_chain, env.dst_chain) source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = storage.sdm.api.copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(100.0, job.progress) self.assertNotIn('error', job.info()) verify_qemu_chain(env.dst_chain) self.assertEqual(sc.fmt2str(dst_fmt), qemuimg.info(dst_vol.volumePath)['format'])
def test_set_generation(self, env_type): with self.make_env(env_type) as env: vol = env.chain[0] job = update_volume.Job(make_uuid(), 0, make_endpoint_from_volume(vol), dict(generation=44)) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(44, vol.getMetaParam(sc.GENERATION))
def run_job(self, storage_type, cur_gen): with self.get_vol(storage_type) as vol: job_id = make_uuid() info = dict(endpoint_type='div', sd_id=vol.sdUUID, img_id=vol.imgUUID, vol_id=vol.volUUID, generation=cur_gen) job = storage.sdm.api.set_volume_generation.Job(job_id, 0, info, 1) job.run() wait_for_job(job) return job
def test_create_volume_domainlock_contended(self): def error(*args): raise se.AcquireLockFailure('id', 'rc', 'out', 'err') args = self._get_args() args['sd_manifest'].acquireDomainLock = error job = storage.sdm.api.create_volume.Job(**args) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.FAILED, job.status) self.assertEqual(se.AcquireLockFailure.code, job.info()['error']['code'])
def test_set_legality_invalid(self, env_type, legality): with self.make_env(env_type) as env: vol = env.chain[0] vol.setLegality(legality) generation = vol.getMetaParam(sc.GENERATION) job = update_volume.Job(make_uuid(), 0, make_endpoint_from_volume(vol), dict(legality=legality)) job.run() wait_for_job(job) self.assertEqual(job.status, jobs.STATUS.FAILED) self.assertEqual(type(job.error), se.InvalidVolumeUpdate) self.assertEqual(generation, vol.getMetaParam(sc.GENERATION))
def test_set_description(self, env_type): with self.make_env(env_type) as env: vol = env.chain[0] generation = vol.getMetaParam(sc.GENERATION) description = 'my wonderful description' job = update_volume.Job(make_uuid(), 0, make_endpoint_from_volume(vol), dict(description=description)) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(description, vol.getMetaParam(sc.DESCRIPTION)) self.assertEqual(generation + 1, vol.getMetaParam(sc.GENERATION))
def test_set_legality(self, env_type, legality, expected): with self.make_env(env_type) as env: vol = env.chain[0] vol.setLegality(legality) generation = vol.getMetaParam(sc.GENERATION) job = update_volume.Job(make_uuid(), 0, make_endpoint_from_volume(vol), dict(legality=expected)) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(expected, vol.getMetaParam(sc.LEGALITY)) self.assertEqual(generation + 1, vol.getMetaParam(sc.GENERATION))
def test_job(self): job_id = make_uuid() sp_id = make_uuid() sd_id = make_uuid() img0_id = make_uuid() img1_id = make_uuid() vol0_id = make_uuid() vol1_id = make_uuid() images = [ { 'sd_id': sd_id, 'img_id': img0_id, 'vol_id': vol0_id }, { 'sd_id': sd_id, 'img_id': img1_id, 'vol_id': vol1_id }, ] expected = [ ('prepareImage', (sd_id, sp_id, img0_id, vol0_id), { 'allowIllegal': True }), ('prepareImage', (sd_id, sp_id, img1_id, vol1_id), { 'allowIllegal': True }), ('teardownImage', (sd_id, sp_id, img1_id), {}), ('teardownImage', (sd_id, sp_id, img0_id), {}), ] with namedTemporaryDir() as base: irs = FakeIRS(base) job = seal.Job(job_id, sp_id, images, irs) job.autodelete = False job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(expected, irs.__calls__) for image in images: resultpath = _vol_path(base, image['sd_id'], sp_id, image['img_id'], ext='.res') with open(resultpath) as f: data = f.read() self.assertEqual(data, 'fake-virt-sysprep was here')
def test_set_type_leaf_with_parent(self, env_type): with self.make_env(env_type, chain_length=2) as env: top_vol = env.chain[1] generation = top_vol.getMetaParam(sc.GENERATION) job = update_volume.Job(make_uuid(), 0, make_endpoint_from_volume(top_vol), dict(type=sc.type2name(sc.SHARED_VOL))) job.run() wait_for_job(job) self.assertEqual(job.status, jobs.STATUS.FAILED) self.assertEqual(type(job.error), se.InvalidVolumeUpdate) self.assertEqual(sc.type2name(sc.LEAF_VOL), top_vol.getMetaParam(sc.VOLTYPE)) self.assertEqual(generation, top_vol.getMetaParam(sc.GENERATION))
def test_set_type_internal(self, env_type): with self.make_env(env_type, chain_length=1) as env: internal_vol = env.chain[0] generation = internal_vol.getMetaParam(sc.GENERATION) internal_vol.setInternal() job = update_volume.Job(make_uuid(), 0, make_endpoint_from_volume(internal_vol), dict(type=sc.type2name(sc.SHARED_VOL))) job.run() wait_for_job(job) self.assertEqual(job.status, jobs.STATUS.FAILED) self.assertEqual(type(job.error), se.InvalidVolumeUpdate) self.assertEqual(generation, internal_vol.getMetaParam(sc.GENERATION))
def test_set_type(self, env_type): with self.make_env(env_type) as env: leaf_vol = env.chain[0] generation = leaf_vol.getMetaParam(sc.GENERATION) job = update_volume.Job(make_uuid(), 0, make_endpoint_from_volume(leaf_vol), dict(type=sc.type2name(sc.SHARED_VOL))) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(sc.type2name(sc.SHARED_VOL), leaf_vol.getMetaParam(sc.VOLTYPE)) self.assertEqual(generation + 1, leaf_vol.getMetaParam(sc.GENERATION))
def test_vol_type_not_qcow(self, env_type): fmt = sc.name2type('raw') job_id = make_uuid() with self.make_env(env_type, fmt, sd_version=4) as env: env_vol = env.chain[0] generation = env_vol.getMetaParam(sc.GENERATION) vol = dict(endpoint_type='div', sd_id=env_vol.sdUUID, img_id=env_vol.imgUUID, vol_id=env_vol.volUUID) vol_attr = dict(compat='1.1') job = amend_volume.Job(job_id, 0, vol, vol_attr) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.FAILED, job.status) self.assertEqual(type(job.error), se.GeneralException) self.assertEqual(sc.LEGAL_VOL, env_vol.getLegality()) self.assertEqual(generation, env_vol.getMetaParam(sc.GENERATION))
def test_amend(self, env_type): fmt = sc.name2type('cow') job_id = make_uuid() with self.make_env(env_type, fmt, sd_version=4, qcow2_compat='0.10') as env: env_vol = env.chain[0] generation = env_vol.getMetaParam(sc.GENERATION) self.assertEqual('0.10', env_vol.getQemuImageInfo()['compat']) vol = dict(endpoint_type='div', sd_id=env_vol.sdUUID, img_id=env_vol.imgUUID, vol_id=env_vol.volUUID) vol_attr = dict(compat='1.1') job = amend_volume.Job(job_id, 0, vol, vol_attr) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual('1.1', env_vol.getQemuImageInfo()['compat']) self.assertEqual(generation + 1, env_vol.getMetaParam(sc.GENERATION))
def test_merge_subchain(self, sd_type, chain_len, base_index, top_index): job_id = make_uuid() with self.make_env(sd_type=sd_type, chain_len=chain_len) as env: write_qemu_chain(env.chain) base_vol = env.chain[base_index] top_vol = env.chain[top_index] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) job = api_merge.Job(job_id, subchain) job.run() wait_for_job(job) self.assertEqual(job.status, jobs.STATUS.DONE) # Verify that the chain data was merged for i in range(base_index, top_index + 1): offset = i * 1024 pattern = 0xf0 + i # We expect to read all data from top verify_pattern( top_vol.volumePath, qemuimg.FORMAT.QCOW2, offset=offset, len=1024, pattern=pattern) # And base, since top was merged into base verify_pattern( base_vol.volumePath, sc.fmt2str(base_vol.getFormat()), offset=offset, len=1024, pattern=pattern) self.assertEqual(sorted(self.expected_locks(base_vol)), sorted(guarded.context.locks)) self.assertEqual(base_vol.getLegality(), sc.LEGAL_VOL) self.assertEqual(base_vol.getMetaParam(sc.GENERATION), 1)
def test_sd_version_no_support_compat(self, env_type): fmt = sc.name2type('cow') job_id = make_uuid() with self.make_env(env_type, fmt, sd_version=3) as env: env_vol = env.chain[0] generation = env_vol.getMetaParam(sc.GENERATION) vol = dict(endpoint_type='div', sd_id=env_vol.sdUUID, img_id=env_vol.imgUUID, vol_id=env_vol.volUUID, generation=generation) qcow2_attr = dict(compat='1.1') job = amend_volume.Job(job_id, 0, vol, qcow2_attr) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.FAILED, job.status) self.assertEqual(type(job.error), se.GeneralException) self.assertEqual(sc.LEGAL_VOL, env_vol.getLegality()) self.assertEqual(generation, env_vol.getMetaParam(sc.GENERATION))
def test_teardown_failure(self): job_id = make_uuid() sp_id = make_uuid() sd_id = make_uuid() img0_id = make_uuid() img1_id = TEARDOWN_ERROR_IMAGE_ID vol0_id = make_uuid() vol1_id = make_uuid() images = [ { 'sd_id': sd_id, 'img_id': img0_id, 'vol_id': vol0_id }, { 'sd_id': sd_id, 'img_id': img1_id, 'vol_id': vol1_id }, ] expected = [ ('prepareImage', (sd_id, sp_id, img0_id, vol0_id), { 'allowIllegal': True }), ('prepareImage', (sd_id, sp_id, img1_id, vol1_id), { 'allowIllegal': True }), ('teardownImage', (sd_id, sp_id, img1_id), {}), ('teardownImage', (sd_id, sp_id, img0_id), {}), ] with namedTemporaryDir() as base: irs = FakeIRS(base) job = seal.Job(job_id, sp_id, images, irs) job.autodelete = False job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.FAILED, job.status) self.assertEqual(expected, irs.__calls__)
def test_merge_legal_base(self): job_id = make_uuid() with self.make_env(sd_type='file', chain_len=3) as env: base_vol = env.chain[0] base_vol.setLegality(sc.LEGAL_VOL) top_vol = env.chain[1] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) job = storage.sdm.api.merge.Job(job_id, subchain) job.run() wait_for_job(job) self.assertEqual(job.status, jobs.STATUS.FAILED) self.assertEqual(type(job.error), se.UnexpectedVolumeState) self.assertEqual(base_vol.getMetaParam(sc.GENERATION), 0)
def test_amend(self, env_type): fmt = sc.name2type('cow') job_id = make_uuid() with self.make_env(env_type, fmt, sd_version=4, qcow2_compat='0.10') as env: env_vol = env.chain[0] generation = env_vol.getMetaParam(sc.GENERATION) self.assertEqual('0.10', env_vol.getQemuImageInfo()['compat']) vol = dict(endpoint_type='div', sd_id=env_vol.sdUUID, img_id=env_vol.imgUUID, vol_id=env_vol.volUUID, generation=generation) qcow2_attr = dict(compat='1.1') job = amend_volume.Job(job_id, 0, vol, qcow2_attr) job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual('1.1', env_vol.getQemuImageInfo()['compat']) self.assertEqual(generation + 1, env_vol.getMetaParam(sc.GENERATION))
def test_volume_chain_copy(self, env_type, src_fmt, dst_fmt, copy_seq): src_fmt = sc.name2type(src_fmt) dst_fmt = sc.name2type(dst_fmt) nr_vols = len(copy_seq) with self.make_env(env_type, src_fmt, dst_fmt, chain_length=nr_vols) as env: write_qemu_chain(env.src_chain) for index in copy_seq: job_id = make_uuid() src_vol = env.src_chain[index] dst_vol = env.dst_chain[index] source = dict(endpoint_type='div', sd_id=src_vol.sdUUID, img_id=src_vol.imgUUID, vol_id=src_vol.volUUID) dest = dict(endpoint_type='div', sd_id=dst_vol.sdUUID, img_id=dst_vol.imgUUID, vol_id=dst_vol.volUUID) job = storage.sdm.api.copy_data.Job(job_id, 0, source, dest) job.run() wait_for_job(job) self.assertEqual(sorted(self.expected_locks(src_vol, dst_vol)), sorted(guarded.context.locks)) verify_qemu_chain(env.dst_chain)
def test_create_volume(self): args = self._get_args() job = storage.sdm.api.create_volume.Job(**args) with self._fake_env(): job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertIsNone(job.progress) self.assertNotIn('error', job.info()) # Verify that the domain lock was acquired and released self.assertEqual([('acquireDomainLock', (1, ), {}), ('releaseDomainLock', (), {})], args['sd_manifest'].__calls__) # Verify that the image resource was locked and released image_ns = sd.getNamespace(sc.IMAGE_NAMESPACE, job.sd_manifest.sdUUID) rm_args = (image_ns, job.vol_info.img_id, rm.EXCLUSIVE) self.assertEqual([('acquireResource', rm_args, {}), ('releaseResource', rm_args, {})], self.rm.__calls__)
def test_create_volume(self): args = self._get_args() job = storage.sdm.api.create_volume.Job(**args) with self._fake_env(): job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertIsNone(job.progress) self.assertNotIn('error', job.info()) # Verify that the domain lock was acquired and released self.assertEqual([('acquireDomainLock', (1,), {}), ('releaseDomainLock', (), {})], args['sd_manifest'].__calls__) # Verify that the image resource was locked and released image_ns = sd.getNamespace(sc.IMAGE_NAMESPACE, job.sd_manifest.sdUUID) rm_args = (image_ns, job.vol_info.img_id, rm.EXCLUSIVE) self.assertEqual([('acquireResource', rm_args, {}), ('releaseResource', rm_args, {})], self.rm.__calls__)
def test_job(self): job_id = make_uuid() sp_id = make_uuid() sd_id = make_uuid() img0_id = make_uuid() img1_id = make_uuid() vol0_id = make_uuid() vol1_id = make_uuid() images = [ {'sd_id': sd_id, 'img_id': img0_id, 'vol_id': vol0_id}, {'sd_id': sd_id, 'img_id': img1_id, 'vol_id': vol1_id}, ] expected = [ ('prepareImage', (sd_id, sp_id, img0_id, vol0_id), {'allowIllegal': True}), ('prepareImage', (sd_id, sp_id, img1_id, vol1_id), {'allowIllegal': True}), ('teardownImage', (sd_id, sp_id, img1_id), {}), ('teardownImage', (sd_id, sp_id, img0_id), {}), ] with namedTemporaryDir() as base: irs = FakeIRS(base) job = seal.Job(job_id, sp_id, images, irs) job.autodelete = False job.run() wait_for_job(job) self.assertEqual(jobs.STATUS.DONE, job.status) self.assertEqual(expected, irs.__calls__) for image in images: resultpath = _vol_path(base, image['sd_id'], sp_id, image['img_id'], ext='.res') with open(resultpath) as f: data = f.read() self.assertEqual(data, 'fake-virt-sysprep was here')
def test_merge_subchain(self, sd_type, chain_len, base_index, top_index): job_id = make_uuid() with self.make_env(sd_type=sd_type, chain_len=chain_len) as env: write_qemu_chain(env.chain) base_vol = env.chain[base_index] base_vol.setLegality(sc.ILLEGAL_VOL) top_vol = env.chain[top_index] subchain_info = dict(sd_id=base_vol.sdUUID, img_id=base_vol.imgUUID, base_id=base_vol.volUUID, top_id=top_vol.volUUID, base_generation=0) subchain = merge.SubchainInfo(subchain_info, 0) job = storage.sdm.api.merge.Job(job_id, subchain) job.run() wait_for_job(job) self.assertEqual(job.status, jobs.STATUS.DONE) # Verify that the chain data was merged for i in range(base_index, top_index + 1): offset = i * 1024 pattern = 0xf0 + i # We expect to read all data from top qemu_pattern_verify(top_vol.volumePath, qemuimg.FORMAT.QCOW2, offset=offset, len=1024, pattern=pattern) base_format = (qemuimg.FORMAT.RAW if i == 0 else qemuimg.FORMAT.QCOW2) # And base, since top was merged into base qemu_pattern_verify(base_vol.volumePath, base_format, offset=offset, len=1024, pattern=pattern) self.assertEqual(sorted(self.expected_locks(base_vol)), sorted(guarded.context.locks)) self.assertEqual(base_vol.getLegality(), sc.ILLEGAL_VOL) self.assertEqual(base_vol.getMetaParam(sc.GENERATION), 1)
def run_job(self, job): self.assertEqual(jobs.STATUS.PENDING, job.status) job.run() wait_for_job(job)