def _create_domain(self, name, version, remote_path, block_size=sc.BLOCK_SIZE_512, max_hosts=sc.HOSTS_4K_1M): sd_uuid = str(uuid.uuid4()) dom = localFsSD.LocalFsStorageDomain.create( sdUUID=sd_uuid, domainName=name, domClass=sd.DATA_DOMAIN, remotePath=remote_path, version=version, storageType=sd.LOCALFS_DOMAIN, block_size=block_size, max_hosts=max_hosts) sdCache.knownSDs[sd_uuid] = localFsSD.findDomain sdCache.manuallyAddDomain(dom) # sd.StorageDomainManifest.getRepoPath() assumes at least one pool is # attached dom.attach(self.pool_id) return dom
def create_localfs_domain(self, name, version): """ Create local FS file storage domain in the repository """ remote_path = str(self.tmpdir.mkdir(name)) self.connect_localfs(remote_path) sd_uuid = str(uuid.uuid4()) dom = localFsSD.LocalFsStorageDomain.create( sdUUID=sd_uuid, domainName=name, domClass=sd.DATA_DOMAIN, remotePath=remote_path, version=version, storageType=sd.LOCALFS_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = localFsSD.findDomain sdCache.manuallyAddDomain(dom) # sd.StorageDomainManifest.getRepoPath() assumes at least one pool is # attached dom.attach(self.pool_id) return dom
def test_spm_lifecycle(tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock): msd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * GiB) lvm.createVG(msd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(msd_uuid) master_dom = blockSD.BlockStorageDomain.create(sdUUID=msd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=5, storageType=sd.ISCSI_DOMAIN) sdCache.knownSDs[msd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(master_dom) pool = sp.StoragePool(tmp_repo.pool_id, FakeDomainMonitor(), FakeTaskManager()) pool.setBackend(StoragePoolDiskBackend(pool)) pool.create(poolName="pool", msdUUID=msd_uuid, domList=[msd_uuid], masterVersion=0, leaseParams=sd.DEFAULT_LEASE_PARAMS) pool.startSpm(prevID=0, prevLVER=0, maxHostID=clusterlock.MAX_HOST_ID) pool.stopSpm()
def test_convert_from_v3_to_v4_localfs(tmpdir, tmp_repo, fake_access): remote_path = str(tmpdir.mkdir("domain")) tmp_repo.connect_localfs(remote_path) sd_uuid = str(uuid.uuid4()) dom = localFsSD.LocalFsStorageDomain.create( sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, remotePath=remote_path, version=3, storageType=sd.LOCALFS_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = localFsSD.findDomain sdCache.manuallyAddDomain(dom) assert dom.getVersion() == 3 fc = formatconverter.DefaultFormatConverter() fc.convert( repoPath=tmp_repo.path, hostId=1, imageRepo=dom, isMsd=False, targetFormat='4') # LocalFS do not support external leases, so the only change is the # version. assert dom.getVersion() == 4
def test_extended_snapshot(tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock, domain_version): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * 1024**3) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) dom.refresh() dom.attach(tmp_repo.pool_id) img_uuid = str(uuid.uuid4()) parent_vol_uuid = str(uuid.uuid4()) vol_uuid = str(uuid.uuid4()) dom.createVolume(imgUUID=img_uuid, size=constants.GIB // sc.BLOCK_SIZE_512, volFormat=sc.RAW_FORMAT, preallocate=sc.PREALLOCATED_VOL, diskType='DATA', volUUID=parent_vol_uuid, desc="Test parent volume", srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, initialSize=None) parent_vol = dom.produceVolume(img_uuid, parent_vol_uuid) dom.createVolume(imgUUID=img_uuid, size=2 * constants.GIB // sc.BLOCK_SIZE_512, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=vol_uuid, desc="Extended volume", srcImgUUID=parent_vol.imgUUID, srcVolUUID=parent_vol.volUUID, initialSize=None) vol = dom.produceVolume(img_uuid, vol_uuid) # Verify volume sizes obtained from metadata actual_parent = parent_vol.getInfo() assert int(actual_parent["capacity"]) == constants.GIB actual = vol.getInfo() assert int(actual["capacity"]) == 2 * constants.GIB
def test_failed_to_add_bitmaps_to_v3_domain(tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * GiB) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=3, storageType=sd.ISCSI_DOMAIN) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) dom.refresh() dom.attach(tmp_repo.pool_id) img_uuid = str(uuid.uuid4()) base_vol_uuid = str(uuid.uuid4()) base_vol_capacity = GiB top_vol_uuid = str(uuid.uuid4()) vol_capacity = 2 * base_vol_capacity # Create base volume. dom.createVolume(imgUUID=img_uuid, capacity=base_vol_capacity, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=base_vol_uuid, desc="Test base volume", srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID) base_vol = dom.produceVolume(img_uuid, base_vol_uuid) with pytest.raises(se.UnsupportedOperation): # Create top volume with bitmaps. dom.createVolume(imgUUID=img_uuid, capacity=vol_capacity, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=top_vol_uuid, desc="Test top volume", srcImgUUID=base_vol.imgUUID, srcVolUUID=base_vol.volUUID, add_bitmaps=True)
def test_create_domain_metadata(tmpdir, tmp_repo, fake_access, domain_version): remote_path = str(tmpdir.mkdir("domain")) tmp_repo.connect_localfs(remote_path) sd_uuid = str(uuid.uuid4()) domain_name = "domain" dom = localFsSD.LocalFsStorageDomain.create( sdUUID=sd_uuid, domainName=domain_name, domClass=sd.DATA_DOMAIN, remotePath=remote_path, version=domain_version, storageType=sd.LOCALFS_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = localFsSD.findDomain sdCache.manuallyAddDomain(dom) lease = sd.DEFAULT_LEASE_PARAMS expected = { sd.DMDK_CLASS: sd.DATA_DOMAIN, sd.DMDK_DESCRIPTION: domain_name, sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC], sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES], sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC], sd.DMDK_LOCK_POLICY: "", sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC: lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC], sd.DMDK_POOLS: [], sd.DMDK_ROLE: sd.REGULAR_DOMAIN, sd.DMDK_SDUUID: sd_uuid, sd.DMDK_TYPE: sd.LOCALFS_DOMAIN, sd.DMDK_VERSION: domain_version, fileSD.REMOTE_PATH: remote_path } # In version 5 we added ALIGNMENT and BLOCK_SIZE. if domain_version > 4: expected[sd.DMDK_ALIGNMENT] = sc.ALIGNMENT_1M expected[sd.DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512 # Tests also alignment and block size properties here. assert dom.alignment == sc.ALIGNMENT_1M assert dom.block_size == sc.BLOCK_SIZE_512 actual = dom.getMetadata() assert expected == actual
def _create_domain( self, name, version, remote_path, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M): sd_uuid = str(uuid.uuid4()) dom = localFsSD.LocalFsStorageDomain.create( sdUUID=sd_uuid, domainName=name, domClass=sd.DATA_DOMAIN, remotePath=remote_path, version=version, storageType=sd.LOCALFS_DOMAIN, block_size=block_size, alignment=alignment) sdCache.knownSDs[sd_uuid] = localFsSD.findDomain sdCache.manuallyAddDomain(dom) # sd.StorageDomainManifest.getRepoPath() assumes at least one pool is # attached dom.attach(self.pool_id) return dom
def test_volume_metadata(tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock, domain_version): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * GiB) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create( sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) dom.refresh() dom.attach(tmp_repo.pool_id) img_uuid = str(uuid.uuid4()) vol_uuid = str(uuid.uuid4()) dom.createVolume( desc="old description", diskType="DATA", imgUUID=img_uuid, preallocate=sc.SPARSE_VOL, capacity=10 * GiB, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, volFormat=sc.COW_FORMAT, volUUID=vol_uuid) vol = dom.produceVolume(img_uuid, vol_uuid) # Test metadata offset _, slot = vol.getMetadataId() offset = dom.manifest.metadata_offset(slot) if domain_version < 5: assert offset == slot * blockSD.METADATA_SLOT_SIZE_V4 else: assert offset == (blockSD.METADATA_BASE_V5 + slot * blockSD.METADATA_SLOT_SIZE_V5) meta_path = dom.manifest.metadata_volume_path() # Check capacity assert 10 * GiB == vol.getCapacity() vol.setCapacity(0) with pytest.raises(se.MetaDataValidationError): vol.getCapacity() vol.setCapacity(10 * GiB) # Change metadata. md = vol.getMetadata() md.description = "new description" vol.setMetadata(md) with open(meta_path, "rb") as f: f.seek(offset) data = f.read(sc.METADATA_SIZE) data = data.rstrip(b"\0") assert data == md.storage_format(domain_version) # Add additioanl metadata. md = vol.getMetadata() vol.setMetadata(md, CAP=md.capacity) with open(meta_path, "rb") as f: f.seek(offset) data = f.read(sc.METADATA_SIZE) data = data.rstrip(b"\0") assert data == md.storage_format(domain_version, CAP=md.capacity)
def test_volume_life_cycle(monkeypatch, tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock, domain_version): # as creation of block storage domain and volume is quite time consuming, # we test several volume operations in one test to speed up the test suite sd_uuid = str(uuid.uuid4()) domain_name = "domain" dev = tmp_storage.create_device(20 * GiB) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create( sdUUID=sd_uuid, domainName=domain_name, domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) img_uuid = str(uuid.uuid4()) vol_uuid = str(uuid.uuid4()) vol_capacity = 10 * GiB vol_desc = "Test volume" # Create domain directory structure. dom.refresh() # Attache repo pool - SD expects at least one pool is attached. dom.attach(tmp_repo.pool_id) with monkeypatch.context() as mc: mc.setattr(time, "time", lambda: 1550522547) dom.createVolume( imgUUID=img_uuid, capacity=vol_capacity, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType=sc.DATA_DISKTYPE, volUUID=vol_uuid, desc=vol_desc, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID) # test create volume vol = dom.produceVolume(img_uuid, vol_uuid) # Get the metadata slot, used for volume metadata and volume lease offset. _, slot = vol.getMetadataId() lease = dom.getVolumeLease(img_uuid, vol_uuid) assert lease.name == vol.volUUID assert lease.path == "/dev/{}/leases".format(sd_uuid) assert lease.offset == (blockSD.RESERVED_LEASES + slot) * dom.alignment # Test that we created a sanlock resource for this volume. resource = fake_sanlock.read_resource( lease.path, lease.offset, align=dom.alignment, sector=dom.block_size) assert resource == { "acquired": False, "align": dom.alignment, "lockspace": vol.sdUUID.encode("utf-8"), "resource": vol.volUUID.encode("utf-8"), "sector": dom.block_size, "version": 0, } # Test volume info. actual = vol.getInfo() assert int(actual["capacity"]) == vol_capacity assert int(actual["ctime"]) == 1550522547 assert actual["description"] == vol_desc assert actual["disktype"] == "DATA" assert actual["domain"] == sd_uuid assert actual["format"] == "COW" assert actual["lease"] == { "offset": lease.offset, "owners": [], "path": lease.path, "version": None, } assert actual["parent"] == sc.BLANK_UUID assert actual["status"] == sc.VOL_STATUS_OK assert actual["type"] == "SPARSE" assert actual["voltype"] == "LEAF" assert actual["uuid"] == vol_uuid vol_path = vol.getVolumePath() # test volume prepare assert os.path.islink(vol_path) assert not os.path.exists(vol_path) lv_size = int(lvm.getLV(sd_uuid, vol_uuid).size) # Check volume size of unprepared volume - uses lvm. size = dom.getVolumeSize(img_uuid, vol_uuid) assert size.apparentsize == size.truesize == lv_size vol.prepare() assert os.path.exists(vol_path) # Check volume size of prepared volume - uses seek. size = dom.getVolumeSize(img_uuid, vol_uuid) assert size.apparentsize == size.truesize == lv_size # verify we can really write and read to an image qemuio.write_pattern(vol_path, "qcow2") qemuio.verify_pattern(vol_path, "qcow2") # test volume teardown vol.teardown(sd_uuid, vol_uuid) assert os.path.islink(vol_path) assert not os.path.exists(vol_path) # test also deleting of the volume vol.delete(postZero=False, force=False, discard=False) # verify lvm with volume is deleted assert not os.path.islink(vol.getVolumePath()) with pytest.raises(se.LogicalVolumeDoesNotExistError): lvm.getLV(sd_uuid, vol_uuid)
def test_create_domain_metadata(tmp_storage, tmp_repo, fake_sanlock, domain_version): sd_uuid = str(uuid.uuid4()) domain_name = "loop-domain" dev1 = tmp_storage.create_device(10 * GiB) dev2 = tmp_storage.create_device(10 * GiB) lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) pv1 = lvm.getPV(dev1) pv2 = lvm.getPV(dev2) dom = blockSD.BlockStorageDomain.create( sdUUID=sd_uuid, domainName=domain_name, domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) lease = sd.DEFAULT_LEASE_PARAMS expected = { # Common storage domain values. sd.DMDK_CLASS: sd.DATA_DOMAIN, sd.DMDK_DESCRIPTION: domain_name, sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC], sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES], sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC], sd.DMDK_LOCK_POLICY: "", sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC: lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC], sd.DMDK_POOLS: [], sd.DMDK_ROLE: sd.REGULAR_DOMAIN, sd.DMDK_SDUUID: sd_uuid, sd.DMDK_TYPE: sd.ISCSI_DOMAIN, sd.DMDK_VERSION: domain_version, # Block storge domain extra values. blockSD.DMDK_VGUUID: vg.uuid, # PV keys for blockSD.DMDK_PV_REGEX. "PV0": { 'guid': os.path.basename(dev1), 'mapoffset': '0', 'pecount': '77', 'pestart': '0', 'uuid': pv1.uuid, }, "PV1": { 'guid': os.path.basename(dev2), 'mapoffset': '77', 'pecount': '77', 'pestart': '0', 'uuid': pv2.uuid, }, } # In version 5 we removed LOGBLKSIZE and PHYBLKSIZE and added # ALIGNMENT and BLOCK_SIZE. if domain_version < 5: expected[sd.DMDK_LOGBLKSIZE] = sc.BLOCK_SIZE_512 expected[sd.DMDK_PHYBLKSIZE] = sc.BLOCK_SIZE_512 else: expected[sd.DMDK_ALIGNMENT] = sc.ALIGNMENT_1M expected[sd.DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512 # Tests also alignment and block size properties here. assert dom.alignment == sc.ALIGNMENT_1M assert dom.block_size == sc.BLOCK_SIZE_512 actual = dom.getMetadata() assert expected == actual # Check that first PV is device where metadata is stored. assert dev1 == lvm.getVgMetadataPv(dom.sdUUID) lv = lvm.getLV(dom.sdUUID, sd.METADATA) assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * MiB # Test the domain lease. lease = dom.getClusterLease() assert lease.name == "SDM" assert lease.path == "/dev/{}/leases".format(dom.sdUUID) assert lease.offset == dom.alignment resource = fake_sanlock.read_resource( lease.path, lease.offset, align=dom.alignment, sector=dom.block_size) assert resource == { "acquired": False, "align": dom.alignment, "lockspace": dom.sdUUID.encode("utf-8"), "resource": lease.name.encode("utf-8"), "sector": dom.block_size, "version": 0, } # Test special volumes sizes. for name in (sd.IDS, sd.INBOX, sd.OUTBOX, sd.METADATA): lv = lvm.getLV(dom.sdUUID, name) # This is the minimal LV size on block storage. assert int(lv.size) == 128 * MiB lv = lvm.getLV(dom.sdUUID, blockSD.MASTERLV) assert int(lv.size) == GiB lv = lvm.getLV(dom.sdUUID, sd.LEASES) assert int(lv.size) == sd.LEASES_SLOTS * dom.alignment if domain_version > 3: lv = lvm.getLV(dom.sdUUID, sd.XLEASES) assert int(lv.size) == sd.XLEASES_SLOTS * dom.alignment
def test_create_snapshot_size( tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock, domain_version): # This test was added to verify fix for https://bugzilla.redhat.com/1700623 # As a result of this bug, there can be volumes with corrupted metadata # capacity. The metadata of such volume should be fixed when the volume is # prepared. As the creation of tmp storage for block SD is time consuming, # let's test this flow also in this test. sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * 1024 ** 3) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create( sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) dom.refresh() dom.attach(tmp_repo.pool_id) img_uuid = str(uuid.uuid4()) parent_vol_uuid = str(uuid.uuid4()) parent_vol_capacity = constants.GIB vol_uuid = str(uuid.uuid4()) vol_capacity = 2 * parent_vol_capacity # Create parent volume. dom.createVolume( imgUUID=img_uuid, size=parent_vol_capacity // sc.BLOCK_SIZE_512, volFormat=sc.RAW_FORMAT, preallocate=sc.PREALLOCATED_VOL, diskType='DATA', volUUID=parent_vol_uuid, desc="Test parent volume", srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, initialSize=None) parent_vol = dom.produceVolume(img_uuid, parent_vol_uuid) # Verify that snapshot cannot be smaller than parent. with pytest.raises(se.InvalidParameterException): dom.createVolume( imgUUID=img_uuid, size=parent_vol.getSize() - 1, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=vol_uuid, desc="Extended volume", srcImgUUID=parent_vol.imgUUID, srcVolUUID=parent_vol.volUUID, initialSize=None) # Verify that snapshot can be bigger than parent. dom.createVolume( imgUUID=img_uuid, size=vol_capacity // sc.BLOCK_SIZE_512, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=vol_uuid, desc="Extended volume", srcImgUUID=parent_vol.imgUUID, srcVolUUID=parent_vol.volUUID, initialSize=None) vol = dom.produceVolume(img_uuid, vol_uuid) # Verify volume sizes obtained from metadata actual_parent = parent_vol.getInfo() assert int(actual_parent["capacity"]) == parent_vol_capacity actual = vol.getInfo() assert int(actual["capacity"]) == vol_capacity # Now test the flow in which metadata capacity is corrupted. # Corrupt the metadata capacity manually. md = vol.getMetadata() md.capacity = vol_capacity // 2 vol.setMetadata(md) # During preparation of the volume, matadata capacity should be fixed. vol.prepare() actual = vol.getInfo() assert int(actual["capacity"]) == vol_capacity
def test_volume_metadata(tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * 1024**3) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=4, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) dom.refresh() dom.attach(tmp_repo.pool_id) img_uuid = str(uuid.uuid4()) vol_uuid = str(uuid.uuid4()) dom.createVolume(desc="old description", diskType="DATA", imgUUID=img_uuid, preallocate=sc.SPARSE_VOL, size=10 * 1024**3, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, volFormat=sc.COW_FORMAT, volUUID=vol_uuid) vol = dom.produceVolume(img_uuid, vol_uuid) # Test metadata offset _, slot = vol.getMetadataId() offset = dom.manifest.metadata_offset(slot) assert offset == slot * blockSD.METADATA_SLOT_SIZE_V4 meta_path = dom.manifest.metadata_volume_path() # Change metadata. md = vol.getMetadata() md.description = "new description" vol.setMetadata(md) with open(meta_path) as f: f.seek(offset) data = f.read(sc.METADATA_SIZE) data = data.rstrip("\0") assert data == md.storage_format(4) # Add additioanl metadata. md = vol.getMetadata() vol.setMetadata(md, CAP=md.capacity) with open(meta_path) as f: f.seek(offset) data = f.read(sc.METADATA_SIZE) data = data.rstrip("\0") assert data == md.storage_format(4, CAP=md.capacity)
def test_convert_to_v5_block(tmpdir, tmp_repo, tmp_storage, tmp_db, fake_rescan, fake_task, fake_sanlock, src_version): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * GiB) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=src_version, storageType=sd.ISCSI_DOMAIN) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) # Create domain directory structure. dom.refresh() # Only attached domains are converted. dom.attach(tmp_repo.pool_id) # Create some volumes in v4 format. for i in range(3): dom.createVolume(desc="Awesome volume %d" % i, diskType="DATA", imgUUID=str(uuid.uuid4()), preallocate=sc.SPARSE_VOL, capacity=10 * GiB, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, volFormat=sc.COW_FORMAT, volUUID=str(uuid.uuid4())) # Record domain and volumes metadata before conversion. old_dom_md = dom.getMetadata() volumes_md = {vol.volUUID: vol.getMetadata() for vol in dom.iter_volumes()} # Simulate a partly-deleted volume with cleared metada. Such volumes could # be created by vdsm < 4.20.34-1. img_id = str(uuid.uuid4()) vol_id = str(uuid.uuid4()) dom.createVolume(desc="Half deleted volume", diskType="DATA", imgUUID=img_id, preallocate=sc.SPARSE_VOL, capacity=10 * GiB, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, volFormat=sc.COW_FORMAT, volUUID=vol_id) partly_deleted_vol = dom.produceVolume(img_id, vol_id) slot = partly_deleted_vol.getMetadataId()[1] dom.manifest.write_metadata_block(slot, CLEARED_VOLUME_METADATA) # Simulate a volume with invalid metada to make sure such volume will not # break conversion. img_id = str(uuid.uuid4()) vol_id = str(uuid.uuid4()) dom.createVolume(desc="Volume with invalid metadata", diskType="DATA", imgUUID=img_id, preallocate=sc.SPARSE_VOL, capacity=10 * GiB, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, volFormat=sc.COW_FORMAT, volUUID=vol_id) invalid_md_vol = dom.produceVolume(img_id, vol_id) slot = invalid_md_vol.getMetadataId()[1] dom.manifest.write_metadata_block(slot, INVALID_VOLUME_METADATA) # These volumes will not be converted to V5 format. skip_volumes = {partly_deleted_vol.volUUID, invalid_md_vol.volUUID} # Convert the domain. fc = formatconverter.DefaultFormatConverter() fc.convert(repoPath=tmp_repo.path, hostId=1, imageRepo=dom, isMsd=False, targetFormat='5') # Verify changes in domain metadata. new_dom_md = dom.getMetadata() # Keys modified in v5. assert old_dom_md.pop("VERSION") == src_version assert new_dom_md.pop("VERSION") == 5 # Keys added in V5. assert new_dom_md.pop("BLOCK_SIZE") == sc.BLOCK_SIZE_512 assert new_dom_md.pop("ALIGNMENT") == sc.ALIGNMENT_1M # Kyes removed in v5. assert old_dom_md.pop("LOGBLKSIZE") == sc.BLOCK_SIZE_512 assert old_dom_md.pop("PHYBLKSIZE") == sc.BLOCK_SIZE_512 # Rest of the keys must not be modifed by conversion. assert old_dom_md == new_dom_md # Verify that xleases volume is created when upgrading from version < 4. xleases_vol = lvm.getLV(sd_uuid, sd.XLEASES) assert int(xleases_vol.size) == sd.XLEASES_SLOTS * dom.alignment with pytest.raises(se.NoSuchLease): dom.manifest.lease_info("no-such-lease") # Verify that volumes metadta was converted to v5 format. for vol in dom.iter_volumes(): if vol.volUUID in skip_volumes: continue vol_md = volumes_md[vol.volUUID] _, slot = vol.getMetadataId() data = dom.manifest.read_metadata_block(slot) data = data.rstrip(b"\0") assert data == vol_md.storage_format(5) # Verify that invalid metadata was copied to v5 area. slot = partly_deleted_vol.getMetadataId()[1] assert dom.manifest.read_metadata_block(slot) == CLEARED_VOLUME_METADATA slot = invalid_md_vol.getMetadataId()[1] assert dom.manifest.read_metadata_block(slot) == INVALID_VOLUME_METADATA # Check that v4 metadata area is zeroed. meta_path = dom.manifest.metadata_volume_path() offset = blockSD.METADATA_BASE_V4 size = blockSD.METADATA_BASE_V5 - blockSD.METADATA_BASE_V4 data = misc.readblock(meta_path, offset, size) assert data == b"\0" * size
def test_create_with_bitmaps(tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock, domain_version): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * GiB) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) dom.refresh() dom.attach(tmp_repo.pool_id) img_uuid = str(uuid.uuid4()) base_vol_uuid = str(uuid.uuid4()) base_vol_capacity = GiB top_vol_uuid = str(uuid.uuid4()) vol_capacity = 2 * base_vol_capacity bitmap_names = ['bitmap1', 'bitmap2'] # Create base volume. dom.createVolume(imgUUID=img_uuid, capacity=base_vol_capacity, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=base_vol_uuid, desc="Test base volume", srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID) base_vol = dom.produceVolume(img_uuid, base_vol_uuid) base_vol_path = base_vol.getVolumePath() # Prepare the volume in order to create bitmaps base_vol.prepare() # Add new bitmaps to base volume for bitmap_name in bitmap_names: op = qemuimg.bitmap_add( base_vol_path, bitmap_name, ) op.run() # Teardown the volume, test if prepare() will be # called during the snapshot creation base_vol.teardown(sd_uuid, base_vol_uuid) # Create top volume with bitmaps. dom.createVolume(imgUUID=img_uuid, capacity=vol_capacity, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=top_vol_uuid, desc="Test top volume", srcImgUUID=base_vol.imgUUID, srcVolUUID=base_vol.volUUID, add_bitmaps=True) top_vol = dom.produceVolume(img_uuid, top_vol_uuid) top_vol_path = top_vol.getVolumePath() # Prepare the volume in order to get # info on the bitmaps top_vol.prepare() info = qemuimg.info(top_vol_path) # Teardown top volume base_vol.teardown(sd_uuid, top_vol_uuid) assert info['format-specific']['data']['bitmaps'] == [ { "flags": ["auto"], "name": bitmap_names[0], "granularity": 65536 }, { "flags": ["auto"], "name": bitmap_names[1], "granularity": 65536 }, ]
def test_convert_from_v4_to_v5_block(tmpdir, tmp_repo, tmp_storage, tmp_db, fake_rescan, fake_task, fake_sanlock): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * 1024**3) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=4, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) # Create domain directory structure. dom.refresh() # Only attached domains are converted. dom.attach(tmp_repo.pool_id) # Create some volumes in v4 format. for i in range(3): dom.createVolume(desc="Awesome volume %d" % i, diskType="DATA", imgUUID=str(uuid.uuid4()), preallocate=sc.SPARSE_VOL, size=10 * 1024**3, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, volFormat=sc.COW_FORMAT, volUUID=str(uuid.uuid4())) # Record domain and volumes metadata before conversion. old_dom_md = dom.getMetadata() volumes_md = {vol.volUUID: vol.getMetadata() for vol in dom.iter_volumes()} fc = formatconverter.DefaultFormatConverter() fc.convert(repoPath=tmp_repo.path, hostId=1, imageRepo=dom, isMsd=False, targetFormat='5') # Verify changes in domain metadata. new_dom_md = dom.getMetadata() # Keys modified in v5. assert old_dom_md.pop("VERSION") == 4 assert new_dom_md.pop("VERSION") == 5 # Keys added in V5. assert new_dom_md.pop("BLOCK_SIZE") == sc.BLOCK_SIZE_512 assert new_dom_md.pop("ALIGNMENT") == sc.ALIGNMENT_1M # Kyes removed in v5. assert old_dom_md.pop("LOGBLKSIZE") == sc.BLOCK_SIZE_512 assert old_dom_md.pop("PHYBLKSIZE") == sc.BLOCK_SIZE_512 # Rest of the keys must not be modifed by conversion. assert old_dom_md == new_dom_md # Verify that volumes metadta was converted to v5 format. for vol in dom.iter_volumes(): vol_md = volumes_md[vol.volUUID] _, slot = vol.getMetadataId() data = dom.manifest.read_metadata_block(slot) data = data.rstrip("\0") assert data == vol_md.storage_format(5) # Check that v4 metadata area is zeroed. meta_path = dom.manifest.metadata_volume_path() offset = blockSD.METADATA_BASE_V4 size = blockSD.METADATA_BASE_V5 - blockSD.METADATA_BASE_V4 data = misc.readblock(meta_path, offset, size) assert data == "\0" * size
def test_create_domain_metadata(tmp_storage, tmp_repo, fake_sanlock, domain_version): sd_uuid = str(uuid.uuid4()) domain_name = "loop-domain" dev1 = tmp_storage.create_device(10 * 1024**3) dev2 = tmp_storage.create_device(10 * 1024**3) lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) pv1 = lvm.getPV(dev1) pv2 = lvm.getPV(dev2) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName=domain_name, domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) lease = sd.DEFAULT_LEASE_PARAMS expected = { # Common storage domain values. sd.DMDK_CLASS: sd.DATA_DOMAIN, sd.DMDK_DESCRIPTION: domain_name, sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC], sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES], sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC], sd.DMDK_LOCK_POLICY: "", sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC: lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC], sd.DMDK_POOLS: [], sd.DMDK_ROLE: sd.REGULAR_DOMAIN, sd.DMDK_SDUUID: sd_uuid, sd.DMDK_TYPE: sd.ISCSI_DOMAIN, sd.DMDK_VERSION: domain_version, # Block storge domain extra values. blockSD.DMDK_VGUUID: vg.uuid, # PV keys for blockSD.DMDK_PV_REGEX. "PV0": { 'guid': os.path.basename(dev1), 'mapoffset': '0', 'pecount': '77', 'pestart': '0', 'uuid': pv1.uuid, }, "PV1": { 'guid': os.path.basename(dev2), 'mapoffset': '77', 'pecount': '77', 'pestart': '0', 'uuid': pv2.uuid, }, } # In version 5 we removed LOGBLKSIZE and PHYBLKSIZE and added # ALIGNMENT and BLOCK_SIZE. if domain_version < 5: expected[sd.DMDK_LOGBLKSIZE] = sc.BLOCK_SIZE_512 expected[sd.DMDK_PHYBLKSIZE] = sc.BLOCK_SIZE_512 else: expected[sd.DMDK_ALIGNMENT] = sc.ALIGNMENT_1M expected[sd.DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512 # Tests also alignment and block size properties here. assert dom.alignment == sc.ALIGNMENT_1M assert dom.block_size == sc.BLOCK_SIZE_512 actual = dom.getMetadata() assert expected == actual # Check that first PV is device where metadata is stored. assert dev1 == lvm.getVgMetadataPv(dom.sdUUID) lv = lvm.getLV(dom.sdUUID, sd.METADATA) assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * constants.MEGAB
def test_volume_life_cycle(monkeypatch, tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock): # as creation of block storage domain and volume is quite time consuming, # we test several volume operations in one test to speed up the test suite sd_uuid = str(uuid.uuid4()) domain_name = "domain" domain_version = 4 dev = tmp_storage.create_device(20 * 1024 ** 3) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create( sdUUID=sd_uuid, domainName=domain_name, domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) img_uuid = str(uuid.uuid4()) vol_uuid = str(uuid.uuid4()) vol_capacity = 10 * 1024**3 vol_size = vol_capacity // sc.BLOCK_SIZE_512 vol_desc = "Test volume" # Create domain directory structure. dom.refresh() # Attache repo pool - SD expects at least one pool is attached. dom.attach(tmp_repo.pool_id) with monkeypatch.context() as mc: mc.setattr(time, "time", lambda: 1550522547) dom.createVolume( imgUUID=img_uuid, size=vol_size, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType=sc.DATA_DISKTYPE, volUUID=vol_uuid, desc=vol_desc, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID) # test create volume vol = dom.produceVolume(img_uuid, vol_uuid) actual = vol.getInfo() expected_lease = { "offset": ((blockSD.RESERVED_LEASES + 4) * sc.BLOCK_SIZE_512 * sd.LEASE_BLOCKS), "owners": [], "path": "/dev/{}/leases".format(sd_uuid), "version": None, } assert int(actual["capacity"]) == vol_capacity assert int(actual["ctime"]) == 1550522547 assert actual["description"] == vol_desc assert actual["disktype"] == "DATA" assert actual["domain"] == sd_uuid assert actual["format"] == "COW" assert actual["lease"] == expected_lease assert actual["parent"] == sc.BLANK_UUID assert actual["status"] == "OK" assert actual["type"] == "SPARSE" assert actual["voltype"] == "LEAF" assert actual["uuid"] == vol_uuid vol_path = vol.getVolumePath() # Keep the slot before deleting the volume. _, slot = vol.getMetadataId() # test volume prepare assert os.path.islink(vol_path) assert not os.path.exists(vol_path) vol.prepare() assert os.path.exists(vol_path) # verify we can really write and read to an image qemuio.write_pattern(vol_path, "qcow2") qemuio.verify_pattern(vol_path, "qcow2") # test volume teardown vol.teardown(sd_uuid, vol_uuid) assert os.path.islink(vol_path) assert not os.path.exists(vol_path) # test also deleting of the volume vol.delete(postZero=False, force=False, discard=False) # verify lvm with volume is deleted assert not os.path.islink(vol.getVolumePath()) with pytest.raises(se.LogicalVolumeDoesNotExistError): lvm.getLV(sd_uuid, vol_uuid) # verify also metadata from metadata lv is deleted data = dom.manifest.read_metadata_block(slot) assert data == b"\0" * sc.METADATA_SIZE
def test_volume_life_cycle(monkeypatch, tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock): # as creation of block storage domain and volume is quite time consuming, # we test several volume operations in one test to speed up the test suite sd_uuid = str(uuid.uuid4()) domain_name = "domain" domain_version = 4 dev = tmp_storage.create_device(20 * 1024**3) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName=domain_name, domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) img_uuid = str(uuid.uuid4()) vol_uuid = str(uuid.uuid4()) vol_capacity = 10 * 1024**3 vol_size = vol_capacity // sc.BLOCK_SIZE_512 vol_desc = "Test volume" # Create domain directory structure. dom.refresh() # Attache repo pool - SD expects at least one pool is attached. dom.attach(tmp_repo.pool_id) with monkeypatch.context() as mc: mc.setattr(time, "time", lambda: 1550522547) dom.createVolume(imgUUID=img_uuid, size=vol_size, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType=sc.DATA_DISKTYPE, volUUID=vol_uuid, desc=vol_desc, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID) # test create volume vol = dom.produceVolume(img_uuid, vol_uuid) actual = vol.getInfo() expected_lease = { "offset": ((blockSD.RESERVED_LEASES + 4) * sc.BLOCK_SIZE_512 * sd.LEASE_BLOCKS), "owners": [], "path": "/dev/{}/leases".format(sd_uuid), "version": None, } assert int(actual["capacity"]) == vol_capacity assert int(actual["ctime"]) == 1550522547 assert actual["description"] == vol_desc assert actual["disktype"] == "DATA" assert actual["domain"] == sd_uuid assert actual["format"] == "COW" assert actual["lease"] == expected_lease assert actual["parent"] == sc.BLANK_UUID assert actual["status"] == "OK" assert actual["type"] == "SPARSE" assert actual["voltype"] == "LEAF" assert actual["uuid"] == vol_uuid vol_path = vol.getVolumePath() # Keep the slot before deleting the volume. _, slot = vol.getMetadataId() # test volume prepare assert os.path.islink(vol_path) assert not os.path.exists(vol_path) vol.prepare() assert os.path.exists(vol_path) # verify we can really write and read to an image qemuio.write_pattern(vol_path, "qcow2") qemuio.verify_pattern(vol_path, "qcow2") # test volume teardown vol.teardown(sd_uuid, vol_uuid) assert os.path.islink(vol_path) assert not os.path.exists(vol_path) # test also deleting of the volume vol.delete(postZero=False, force=False, discard=False) # verify lvm with volume is deleted assert not os.path.islink(vol.getVolumePath()) with pytest.raises(se.LogicalVolumeDoesNotExistError): lvm.getLV(sd_uuid, vol_uuid)
def test_volume_metadata(tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * 1024 ** 3) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create( sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=4, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) dom.refresh() dom.attach(tmp_repo.pool_id) img_uuid = str(uuid.uuid4()) vol_uuid = str(uuid.uuid4()) dom.createVolume( desc="old description", diskType="DATA", imgUUID=img_uuid, preallocate=sc.SPARSE_VOL, size=10 * 1024**3, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, volFormat=sc.COW_FORMAT, volUUID=vol_uuid) vol = dom.produceVolume(img_uuid, vol_uuid) # Test metadata offset _, slot = vol.getMetadataId() offset = dom.manifest.metadata_offset(slot) assert offset == slot * blockSD.METADATA_SLOT_SIZE_V4 meta_path = dom.manifest.metadata_volume_path() # Change metadata. md = vol.getMetadata() md.description = "new description" vol.setMetadata(md) with open(meta_path) as f: f.seek(offset) data = f.read(sc.METADATA_SIZE) data = data.rstrip("\0") assert data == md.storage_format(4) # Add additioanl metadata. md = vol.getMetadata() vol.setMetadata(md, CAP=md.capacity) with open(meta_path) as f: f.seek(offset) data = f.read(sc.METADATA_SIZE) data = data.rstrip("\0") assert data == md.storage_format(4, CAP=md.capacity)
def test_create_snapshot_size(tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock, domain_version): # This test was added to verify fix for https://bugzilla.redhat.com/1700623 # As a result of this bug, there can be volumes with corrupted metadata # capacity. The metadata of such volume should be fixed when the volume is # prepared. As the creation of tmp storage for block SD is time consuming, # let's test this flow also in this test. sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * 1024**3) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) dom.refresh() dom.attach(tmp_repo.pool_id) img_uuid = str(uuid.uuid4()) parent_vol_uuid = str(uuid.uuid4()) parent_vol_capacity = constants.GIB vol_uuid = str(uuid.uuid4()) vol_capacity = 2 * parent_vol_capacity # Create parent volume. dom.createVolume(imgUUID=img_uuid, size=parent_vol_capacity // sc.BLOCK_SIZE_512, volFormat=sc.RAW_FORMAT, preallocate=sc.PREALLOCATED_VOL, diskType='DATA', volUUID=parent_vol_uuid, desc="Test parent volume", srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, initialSize=None) parent_vol = dom.produceVolume(img_uuid, parent_vol_uuid) # Verify that snapshot cannot be smaller than parent. with pytest.raises(se.InvalidParameterException): dom.createVolume(imgUUID=img_uuid, size=parent_vol.getSize() - 1, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=vol_uuid, desc="Extended volume", srcImgUUID=parent_vol.imgUUID, srcVolUUID=parent_vol.volUUID, initialSize=None) # Verify that snapshot can be bigger than parent. dom.createVolume(imgUUID=img_uuid, size=vol_capacity // sc.BLOCK_SIZE_512, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=vol_uuid, desc="Extended volume", srcImgUUID=parent_vol.imgUUID, srcVolUUID=parent_vol.volUUID, initialSize=None) vol = dom.produceVolume(img_uuid, vol_uuid) # Verify volume sizes obtained from metadata actual_parent = parent_vol.getInfo() assert int(actual_parent["capacity"]) == parent_vol_capacity actual = vol.getInfo() assert int(actual["capacity"]) == vol_capacity # Now test the flow in which metadata capacity is corrupted. # Corrupt the metadata capacity manually. md = vol.getMetadata() md.capacity = vol_capacity // 2 vol.setMetadata(md) # During preparation of the volume, matadata capacity should be fixed. vol.prepare() actual = vol.getInfo() assert int(actual["capacity"]) == vol_capacity
def test_create_domain_metadata(tmp_storage, tmp_repo, fake_sanlock, domain_version): sd_uuid = str(uuid.uuid4()) domain_name = "loop-domain" dev1 = tmp_storage.create_device(10 * 1024**3) dev2 = tmp_storage.create_device(10 * 1024**3) lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) pv1 = lvm.getPV(dev1) pv2 = lvm.getPV(dev2) dom = blockSD.BlockStorageDomain.create( sdUUID=sd_uuid, domainName=domain_name, domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) lease = sd.DEFAULT_LEASE_PARAMS expected = { # Common storage domain values. sd.DMDK_CLASS: sd.DATA_DOMAIN, sd.DMDK_DESCRIPTION: domain_name, sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC], sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES], sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC], sd.DMDK_LOCK_POLICY: "", sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC: lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC], sd.DMDK_POOLS: [], sd.DMDK_ROLE: sd.REGULAR_DOMAIN, sd.DMDK_SDUUID: sd_uuid, sd.DMDK_TYPE: sd.ISCSI_DOMAIN, sd.DMDK_VERSION: domain_version, # Block storge domain extra values. blockSD.DMDK_VGUUID: vg.uuid, # PV keys for blockSD.DMDK_PV_REGEX. "PV0": { 'guid': os.path.basename(dev1), 'mapoffset': '0', 'pecount': '77', 'pestart': '0', 'uuid': pv1.uuid, }, "PV1": { 'guid': os.path.basename(dev2), 'mapoffset': '77', 'pecount': '77', 'pestart': '0', 'uuid': pv2.uuid, }, } # In version 5 we removed LOGBLKSIZE and PHYBLKSIZE and added # ALIGNMENT and BLOCK_SIZE. if domain_version < 5: expected[sd.DMDK_LOGBLKSIZE] = sc.BLOCK_SIZE_512 expected[sd.DMDK_PHYBLKSIZE] = sc.BLOCK_SIZE_512 else: expected[sd.DMDK_ALIGNMENT] = sc.ALIGNMENT_1M expected[sd.DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512 actual = dom.getMetadata() assert expected == actual # Check that first PV is device where metadata is stored. assert dev1 == lvm.getVgMetadataPv(dom.sdUUID) lv = lvm.getLV(dom.sdUUID, sd.METADATA) assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * constants.MEGAB