def test_lv_create_remove(tmp_storage, read_only): dev_size = 10 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_any = "lv-on-any-device" lv_specific = "lv-on-device-2" # Creating VG and LV requires read-write mode. lvm.set_read_only(False) lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) # Create the first LV on any device. lvm.createLV(vg_name, lv_any, 1024) # Getting lv must work in both read-only and read-write modes. lvm.set_read_only(read_only) clear_stats() lv = lvm.getLV(vg_name, lv_any) check_stats(hits=0, misses=1) # Call getLV() again will have cache hit. lvm.getLV(vg_name, lv_any) check_stats(hits=1, misses=1) assert lv.name == lv_any assert lv.vg_name == vg_name assert int(lv.size) == GiB assert lv.tags == () assert lv.writeable assert not lv.opened assert lv.active # LV typically created on dev1. device, extent = lvm.getFirstExt(vg_name, lv_any) assert device in dev1, dev2 assert extent == "0" # Create the second LV on dev2 - reuquires read-write mode. lvm.set_read_only(False) lvm.createLV(vg_name, lv_specific, 1024, device=dev2) # Testing LV must work in both read-only and read-write modes. lvm.set_read_only(read_only) device, extent = lvm.getFirstExt(vg_name, lv_specific) assert device == dev2 # Remove both LVs - requires read-write mode. lvm.set_read_only(False) lvm.removeLVs(vg_name, [lv_any, lv_specific]) # Testing if lv exists most work in both read-only and read-write. lvm.set_read_only(read_only) for lv_name in (lv_any, lv_specific): clear_stats() with pytest.raises(se.LogicalVolumeDoesNotExistError): lvm.getLV(vg_name, lv_name) check_stats(hits=0, misses=1)
def test_lv_activate_deactivate(tmp_storage, read_only): dev_size = 20 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024, activate=False) lvm.set_read_only(read_only) lv = lvm.getLV(vg_name, lv_name) assert not lv.active # Activate the inactive lv. lvm.activateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert lv.active # Deactivate the active lv. lvm.deactivateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert not lv.active
def test_reload_lvs_with_stale_lv(tmp_storage): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv1 = "lv1" lv2 = "lv2" # Creating VG and LV requires read-write mode. lvm.set_read_only(False) lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) # Create the LVs. lvm.createLV(vg_name, lv1, 1024) lvm.createLV(vg_name, lv2, 1024) # Make sure that LVs are in the cache. expected_lv1 = lvm.getLV(vg_name, lv1) expected_lv2 = lvm.getLV(vg_name, lv2) # Simulate LV removed on the SPM while this host keeps it in the cache. commands.run([ "lvremove", "-f", "--config", tmp_storage.lvm_config(), "{}/{}".format(vg_name, lv2) ]) # Test removing staled LVs in LVMCache._reloadlvs() which can be invoked # e.g. by calling lvm.getLv(vg_name). lvs = lvm.getLV(vg_name) # And verify that first LV is still correctly reported. assert expected_lv1 in lvs assert expected_lv2 not in lvs
def test_spm_lifecycle(tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock): msd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * GiB) lvm.createVG(msd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(msd_uuid) master_dom = blockSD.BlockStorageDomain.create(sdUUID=msd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=5, storageType=sd.ISCSI_DOMAIN) sdCache.knownSDs[msd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(master_dom) pool = sp.StoragePool(tmp_repo.pool_id, FakeDomainMonitor(), FakeTaskManager()) pool.setBackend(StoragePoolDiskBackend(pool)) pool.create(poolName="pool", msdUUID=msd_uuid, domList=[msd_uuid], masterVersion=0, leaseParams=sd.DEFAULT_LEASE_PARAMS) pool.startSpm(prevID=0, prevLVER=0, maxHostID=clusterlock.MAX_HOST_ID) pool.stopSpm()
def test_retry_with_wider_filter(tmp_storage, read_only): lvm.set_read_only(read_only) # Force reload of the cache. The system does not know about any device at # this point. lvm.getAllPVs() # Create a device - this device in not the lvm cached filter yet. dev = tmp_storage.create_device(20 * GiB) # Creating VG requires read-write mode. lvm.set_read_only(False) # We run vgcreate with explicit devices argument, so the filter is correct # and it succeeds. vg_name = str(uuid.uuid4()) lvm.createVG(vg_name, [dev], "initial-tag", 128) # Checking VG must work in both read-only and read-write modes. lvm.set_read_only(read_only) # The cached filter is stale at this point, and so is the vg metadata in # the cache. Running "vgs --select 'vg_name = vg-name'" will return no data # because of the stale filter, so we invalidate the filter and run it # again. vg = lvm.getVG(vg_name) assert vg.pv_name == (dev, )
def stale_lv(tmp_storage): dev_size = 1 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) good_lv_name = "good" stale_lv_name = "stale" lvm.set_read_only(False) # Create VG with 2 lvs. lvm.createVG(vg_name, [dev], "initial-tag", 128) for lv_name in (good_lv_name, stale_lv_name): lvm.createLV(vg_name, lv_name, 128, activate=False) # Reload the cache. good_lv = lvm.getLV(vg_name, good_lv_name) stale_lv = lvm.getLV(vg_name, stale_lv_name) # Simulate removal of the second LV on another host, leaving stale LV in # the cache. commands.run([ "lvremove", "--config", tmp_storage.lvm_config(), "{}/{}".format(vg_name, stale_lv_name), ]) # The cache still keeps both lvs. assert lvm._lvminfo._lvs == { (vg_name, good_lv_name): good_lv, (vg_name, stale_lv_name): stale_lv, } return vg_name, good_lv_name, stale_lv_name
def test_dump_sd_metadata(monkeypatch, tmp_storage, tmp_repo, fake_sanlock, fake_task, domain_version): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * GiB) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName="test", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN) dom.refresh() dom.attach(tmp_repo.pool_id) md_dev = os.path.basename(dev) expected_metadata = { 'uuid': sd_uuid, 'type': 'ISCSI', 'class': 'Data', 'name': 'test', 'role': sd.REGULAR_DOMAIN, 'pool': [tmp_repo.pool_id], 'version': str(domain_version), 'block_size': sc.BLOCK_SIZE_512, 'alignment': sc.ALIGNMENT_1M, 'vguuid': vg.uuid, 'state': 'OK', 'metadataDevice': md_dev, 'vgMetadataDevice': md_dev } assert dom.dump() == {"metadata": expected_metadata}
def test_lv_activate_deactivate(tmp_storage, read_only): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024, activate=False) lvm.set_read_only(read_only) lv = lvm.getLV(vg_name, lv_name) assert not lv.active # Activate the inactive lv. lvm.activateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert lv.active # Deactivate the active lv. lvm.deactivateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert not lv.active
def stale_pv(tmp_storage): dev_size = 1 * 1024**3 good_pv_name = tmp_storage.create_device(dev_size) stale_pv_name = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # Create VG with 2 PVs. lvm.createVG(vg_name, [good_pv_name, stale_pv_name], "initial-tag", 128) # Reload the cache. pvs = sorted(pv.name for pv in lvm.getAllPVs()) assert pvs == sorted([good_pv_name, stale_pv_name]) # Simulate removal of the second PV on another host, leaving stale PV in # the cache. commands.run([ "vgreduce", "--config", tmp_storage.lvm_config(), vg_name, stale_pv_name, ]) commands.run([ "pvremove", "--config", tmp_storage.lvm_config(), stale_pv_name, ]) # We still report both devies. pvs = sorted(pv.name for pv in lvm.getAllPVs()) assert pvs == sorted([good_pv_name, stale_pv_name]) return vg_name, good_pv_name, stale_pv_name
def test_vg_invalidate_lvs(tmp_storage): dev_size = 1 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, "lv1", 128, activate=False) # Reload cache. pv = lvm.getPV(dev) vg = lvm.getVG(vg_name) lv = lvm.getLV(vg_name)[0] assert lvm._lvminfo._pvs == {dev: pv} assert lvm._lvminfo._vgs == {vg_name: vg} assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lv} # Invalidate VG including LVs. lvm.invalidateVG(vg_name) assert lvm._lvminfo._pvs == {dev: pv} assert lvm._lvminfo._vgs == {vg_name: lvm.Stale(vg_name)} assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lvm.Stale("lv1")}
def stale_vg(tmp_storage): dev_size = 1 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) good_vg_name = str(uuid.uuid4()) stale_vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # Create 1 VGs lvm.createVG(good_vg_name, [dev1], "initial-tag", 128) lvm.createVG(stale_vg_name, [dev2], "initial-tag", 128) # Reload the cache. vgs = sorted(vg.name for vg in lvm.getAllVGs()) assert vgs == sorted([good_vg_name, stale_vg_name]) # Simulate removal of the second VG on another host, leaving stale VG in # the cache. commands.run([ "vgremove", "--config", tmp_storage.lvm_config(), stale_vg_name, ]) # We still report both vgs. vgs = sorted(vg.name for vg in lvm.getAllVGs()) assert vgs == sorted([good_vg_name, stale_vg_name]) return good_vg_name, stale_vg_name
def test_get_lvs_after_sd_refresh(tmp_storage): dev_size = 1 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg1_name = str(uuid.uuid4()) vg2_name = str(uuid.uuid4()) # Create two VGs and LVs per each. lvm.createVG(vg1_name, [dev1], "initial-tag", 128) lvm.createVG(vg2_name, [dev2], "initial-tag", 128) lvm.createLV(vg1_name, "lv1", 128, activate=False) lvm.createLV(vg2_name, "lv2", 128, activate=False) # Make sure that LVs are in LVM cache for both VGs. lv1 = lvm.getLV(vg1_name)[0] lv2 = lvm.getLV(vg2_name)[0] # Simulate refresh SD. lvm.invalidateCache() # Reload lvs for vg1. assert lvm.getLV(vg1_name) == [lv1] # Reload lvs for vg2. assert lvm.getLV(vg2_name) == [lv2]
def test_vg_create_multiple_devices(tmp_storage, read_only): dev_size = 10 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) dev3 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev1, dev2, dev3], "initial-tag", 128) lvm.set_read_only(read_only) vg = lvm.getVG(vg_name) assert vg.name == vg_name assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3)) # pvs is broken with read-only mode # https://bugzilla.redhat.com/1809660. lvm.set_read_only(False) # The first pv (metadata pv) will have the 2 used metadata areas. pv = lvm.getPV(dev1) assert pv.name == dev1 assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 # The rest of the pvs will have 2 unused metadata areas. for dev in dev2, dev3: pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 0 # TODO: should work also in read-only mode. lvm.removeVG(vg_name) lvm.set_read_only(read_only) # We remove the VG with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name) # pvs is broken with read-only mode # https://bugzilla.redhat.com/1809660. lvm.set_read_only(False) # But keep the PVs, not sure why. for dev in dev1, dev2, dev3: pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == ""
def test_extended_snapshot(tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock, domain_version): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * 1024**3) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) dom.refresh() dom.attach(tmp_repo.pool_id) img_uuid = str(uuid.uuid4()) parent_vol_uuid = str(uuid.uuid4()) vol_uuid = str(uuid.uuid4()) dom.createVolume(imgUUID=img_uuid, size=constants.GIB // sc.BLOCK_SIZE_512, volFormat=sc.RAW_FORMAT, preallocate=sc.PREALLOCATED_VOL, diskType='DATA', volUUID=parent_vol_uuid, desc="Test parent volume", srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, initialSize=None) parent_vol = dom.produceVolume(img_uuid, parent_vol_uuid) dom.createVolume(imgUUID=img_uuid, size=2 * constants.GIB // sc.BLOCK_SIZE_512, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=vol_uuid, desc="Extended volume", srcImgUUID=parent_vol.imgUUID, srcVolUUID=parent_vol.volUUID, initialSize=None) vol = dom.produceVolume(img_uuid, vol_uuid) # Verify volume sizes obtained from metadata actual_parent = parent_vol.getInfo() assert int(actual_parent["capacity"]) == constants.GIB actual = vol.getInfo() assert int(actual["capacity"]) == 2 * constants.GIB
def test_vg_create_multiple_devices(tmp_storage, read_only): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) dev3 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev1, dev2, dev3], "initial-tag", 128) lvm.set_read_only(read_only) vg = lvm.getVG(vg_name) assert vg.name == vg_name assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3)) # The first pv (metadata pv) will have the 2 used metadata areas. pv = lvm.getPV(dev1) assert pv.name == dev1 assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 # The rest of the pvs will have 2 unused metadata areas. for dev in dev2, dev3: pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 0 lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.removeVG(vg_name) # TODO: check this also in read-only mode. vgs fail now after removing the # vg, and this cause 10 retries that take 15 seconds. # We remove the VG with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name) # But keep the PVs, not sure why. for dev in dev1, dev2, dev3: pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == ""
def test_attach_domain_unsupported_version( monkeypatch, tmp_storage, tmp_repo, fake_task, fake_sanlock): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * GiB) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create( sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=3, storageType=sd.ISCSI_DOMAIN) sdCache.knownSDs[sd_uuid] = blockSD.findDomain # Remove domain metadata dom.setMetadata({}) # Set domain metadata to version 0 metadata = """\ ALIGNMENT=1048576 BLOCK_SIZE=512 CLASS=Data DESCRIPTION=storage domain IOOPTIMEOUTSEC=10 LEASERETRIES=3 LEASETIMESEC=60 LOCKPOLICY= LOCKRENEWALINTERVALSEC=5 POOL_UUID= REMOTE_PATH=server:/path ROLE=Regular SDUUID={} TYPE=LOCALFS VERSION=0 """.format(sd_uuid) with open("/dev/{}/metadata".format(vg.name), "wb") as f: f.write(metadata.encode("utf-8")) spm = fake_spm( tmp_repo.pool_id, 0, {sd_uuid: sd.DOM_UNATTACHED_STATUS}) # Since we removed support for V0 we can no longer read # the replaced metadata from storage and end up with missing # version key when trying to get version for attached domain with pytest.raises(se.MetaDataKeyNotFoundError): spm.attachSD(sd_uuid)
def test_failed_to_add_bitmaps_to_v3_domain(tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * GiB) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=3, storageType=sd.ISCSI_DOMAIN) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) dom.refresh() dom.attach(tmp_repo.pool_id) img_uuid = str(uuid.uuid4()) base_vol_uuid = str(uuid.uuid4()) base_vol_capacity = GiB top_vol_uuid = str(uuid.uuid4()) vol_capacity = 2 * base_vol_capacity # Create base volume. dom.createVolume(imgUUID=img_uuid, capacity=base_vol_capacity, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=base_vol_uuid, desc="Test base volume", srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID) base_vol = dom.produceVolume(img_uuid, base_vol_uuid) with pytest.raises(se.UnsupportedOperation): # Create top volume with bitmaps. dom.createVolume(imgUUID=img_uuid, capacity=vol_capacity, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=top_vol_uuid, desc="Test top volume", srcImgUUID=base_vol.imgUUID, srcVolUUID=base_vol.volUUID, add_bitmaps=True)
def test_vg_invalidate(tmp_storage): dev_size = 1 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg1_name = str(uuid.uuid4()) vg2_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg1_name, [dev1], "initial-tag", 128) lvm.createLV(vg1_name, "lv1", 128, activate=False) lvm.createVG(vg2_name, [dev2], "initial-tag", 128) lvm.createLV(vg2_name, "lv2", 128, activate=False) # Reload cache. pv1 = lvm.getPV(dev1) vg1 = lvm.getVG(vg1_name) lv1 = lvm.getLV(vg1_name)[0] pv2 = lvm.getPV(dev2) vg2 = lvm.getVG(vg2_name) lv2 = lvm.getLV(vg2_name)[0] assert lvm._lvminfo._pvs == {dev1: pv1, dev2: pv2} assert lvm._lvminfo._vgs == {vg1_name: vg1, vg2_name: vg2} assert lvm._lvminfo._lvs == { (vg1_name, "lv1"): lv1, (vg2_name, "lv2"): lv2, } # Invalidate VG including LVs. lvm.invalidateVG(vg1_name, invalidateLVs=False) assert lvm._lvminfo._pvs == {dev1: pv1, dev2: pv2} assert lvm._lvminfo._vgs == { vg1_name: lvm.Stale(vg1_name), vg2_name: vg2, } assert lvm._lvminfo._lvs == { (vg1_name, "lv1"): lv1, (vg2_name, "lv2"): lv2, } # getVGs() always reloads the cache. clear_stats() lvm.getVGs([vg1_name, vg2_name]) check_stats(hits=0, misses=1) assert lvm._lvminfo._vgs == {vg1_name: vg1, vg2_name: vg2}
def test_vg_create_remove_single_device(tmp_storage, read_only): dev_size = 20 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.set_read_only(read_only) vg = lvm.getVG(vg_name) assert vg.name == vg_name assert vg.pv_name == (dev, ) assert vg.tags == ("initial-tag", ) assert int(vg.extent_size) == 128 * MiB # pvs is broken with read-only mode # https://bugzilla.redhat.com/1809660. lvm.set_read_only(False) pv = lvm.getPV(dev) lvm.set_read_only(read_only) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.removeVG(vg_name) lvm.set_read_only(read_only) # We remove the VG with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name) # pvs is broken with read-only mode # https://bugzilla.redhat.com/1809660. lvm.set_read_only(False) # But keep the PVs, not sure why. pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == ""
def test_lv_create_remove(tmp_storage, read_only): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_any = "lv-on-any-device" lv_specific = "lv-on-device-2" # Creating VG and LV requires read-write mode. lvm.set_read_only(False) lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) # Create the first LV on any device. lvm.createLV(vg_name, lv_any, 1024) # Getting lv must work in both read-only and read-write modes. lvm.set_read_only(read_only) lv = lvm.getLV(vg_name, lv_any) assert lv.name == lv_any assert lv.vg_name == vg_name assert int(lv.size) == 1024**3 assert lv.tags == () assert lv.writeable assert not lv.opened assert lv.active # LV typically created on dev1. device, extent = lvm.getFirstExt(vg_name, lv_any) assert device in dev1, dev2 assert extent == "0" # Create the second LV on dev2 - reuquires read-write mode. lvm.set_read_only(False) lvm.createLV(vg_name, lv_specific, 1024, device=dev2) # Testing LV must work in both read-only and read-write modes. lvm.set_read_only(read_only) device, extent = lvm.getFirstExt(vg_name, lv_specific) assert device == dev2 # Remove both LVs - requires read-write mode. lvm.set_read_only(False) lvm.removeLVs(vg_name, [lv_any, lv_specific]) # Testing if lv exists most work in both read-only and read-write. lvm.set_read_only(read_only) for lv_name in (lv_any, lv_specific): with pytest.raises(se.LogicalVolumeDoesNotExistError): lvm.getLV(vg_name, lv_name)
def test_vg_check(tmp_storage, read_only): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) lvm.set_read_only(read_only) assert lvm.chkVG(vg_name)
def test_vg_check(tmp_storage, read_only): dev_size = 10 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) lvm.set_read_only(read_only) assert lvm.chkVG(vg_name)
def test_bootstrap(tmp_storage, read_only): dev_size = 20 * 1024**3 lvm.set_read_only(False) dev1 = tmp_storage.create_device(dev_size) vg1_name = str(uuid.uuid4()) lvm.createVG(vg1_name, [dev1], "initial-tag", 128) dev2 = tmp_storage.create_device(dev_size) vg2_name = str(uuid.uuid4()) lvm.createVG(vg2_name, [dev2], "initial-tag", 128) vgs = (vg1_name, vg2_name) for vg_name in vgs: # Create active lvs. for lv_name in ("skip", "prepared", "opened", "unused"): lvm.createLV(vg_name, lv_name, 1024) # Create links to prepared lvs. img_dir = os.path.join(sc.P_VDSM_STORAGE, vg_name, "img") os.makedirs(img_dir) os.symlink( lvm.lvPath(vg_name, "prepared"), os.path.join(img_dir, "prepared")) # Open some lvs during bootstrap. vg1_opened = lvm.lvPath(vg1_name, "opened") vg2_opened = lvm.lvPath(vg2_name, "opened") with open(vg1_opened), open(vg2_opened): lvm.set_read_only(read_only) lvm.bootstrap(skiplvs=["skip"]) # Lvs in skiplvs, prepared lvs, and opened lvs should be active. for vg_name in vgs: for lv_name in ("skip", "prepared", "opened"): lv = lvm.getLV(vg_name, lv_name) assert lv.active # Unused lvs should not be active. for vg_name in vgs: lv = lvm.getLV(vg_name, "unused") assert not lv.active
def test_bootstrap(tmp_storage, read_only): dev_size = 20 * GiB lvm.set_read_only(False) dev1 = tmp_storage.create_device(dev_size) vg1_name = str(uuid.uuid4()) lvm.createVG(vg1_name, [dev1], "initial-tag", 128) dev2 = tmp_storage.create_device(dev_size) vg2_name = str(uuid.uuid4()) lvm.createVG(vg2_name, [dev2], "initial-tag", 128) vgs = (vg1_name, vg2_name) for vg_name in vgs: # Create active lvs. for lv_name in ("skip", "prepared", "opened", "unused"): lvm.createLV(vg_name, lv_name, 1024) # Create links to prepared lvs. img_dir = os.path.join(sc.P_VDSM_STORAGE, vg_name, "img") os.makedirs(img_dir) os.symlink( lvm.lvPath(vg_name, "prepared"), os.path.join(img_dir, "prepared")) # Open some lvs during bootstrap. vg1_opened = lvm.lvPath(vg1_name, "opened") vg2_opened = lvm.lvPath(vg2_name, "opened") with open(vg1_opened), open(vg2_opened): lvm.set_read_only(read_only) lvm.bootstrap(skiplvs=["skip"]) # Lvs in skiplvs, prepared lvs, and opened lvs should be active. for vg_name in vgs: for lv_name in ("skip", "prepared", "opened"): lv = lvm.getLV(vg_name, lv_name) assert lv.active # Unused lvs should not be active. for vg_name in vgs: lv = lvm.getLV(vg_name, "unused") assert not lv.active
def test_lv_rename(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) new_lv_name = "renamed-" + lv_name lvm.renameLV(vg_name, lv_name, new_lv_name) lv = lvm.getLV(vg_name, new_lv_name) assert lv.name == new_lv_name
def test_lv_rename(tmp_storage): dev_size = 20 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) new_lv_name = "renamed-" + lv_name lvm.renameLV(vg_name, lv_name, new_lv_name) lv = lvm.getLV(vg_name, new_lv_name) assert lv.name == new_lv_name
def test_lv_add_delete_tags(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024, activate=False) lvm.changeLVTags(vg_name, lv_name, delTags=("initial-tag", ), addTags=("new-tag-1", "new-tag-2")) lv = lvm.getLV(vg_name, lv_name) assert sorted(lv.tags) == ["new-tag-1", "new-tag-2"]
def test_vg_add_delete_tags(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.changeVGTags(vg_name, delTags=("initial-tag", ), addTags=("new-tag-1", "new-tag-2")) lvm.changeVGTags(vg_name, delTags=["initial-tag"], addTags=["new-tag-1", "new-tag-2"]) vg = lvm.getVG(vg_name) assert sorted(vg.tags) == ["new-tag-1", "new-tag-2"]
def test_vg_extend_reduce(tmp_storage): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) dev3 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev1], "initial-tag", 128) vg = lvm.getVG(vg_name) assert vg.pv_name == (dev1,) lvm.extendVG(vg_name, [dev2, dev3], force=False) vg = lvm.getVG(vg_name) assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3)) # The first pv (metadata pv) will have the 2 used metadata areas. pv = lvm.getPV(dev1) assert pv.name == dev1 assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 # The rest of the pvs will have 2 unused metadata areas. for dev in dev2, dev3: pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 0 lvm.reduceVG(vg_name, dev2) vg = lvm.getVG(vg_name) assert sorted(vg.pv_name) == sorted((dev1, dev3)) lvm.removeVG(vg_name) with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name)
def test_vg_extend_reduce(tmp_storage): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) dev3 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev1], "initial-tag", 128) vg = lvm.getVG(vg_name) assert vg.pv_name == (dev1, ) lvm.extendVG(vg_name, [dev2, dev3], force=False) vg = lvm.getVG(vg_name) assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3)) # The first pv (metadata pv) will have the 2 used metadata areas. pv = lvm.getPV(dev1) assert pv.name == dev1 assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 # The rest of the pvs will have 2 unused metadata areas. for dev in dev2, dev3: pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 0 lvm.reduceVG(vg_name, dev2) vg = lvm.getVG(vg_name) assert sorted(vg.pv_name) == sorted((dev1, dev3)) lvm.removeVG(vg_name) with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name)
def test_lv_add_delete_tags(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024, activate=False) lvm.changeLVTags( vg_name, lv_name, delTags=("initial-tag",), addTags=("new-tag-1", "new-tag-2")) lv = lvm.getLV(vg_name, lv_name) assert sorted(lv.tags) == ["new-tag-1", "new-tag-2"]
def test_vg_create_remove_single_device(tmp_storage, read_only): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.set_read_only(read_only) vg = lvm.getVG(vg_name) assert vg.name == vg_name assert vg.pv_name == (dev,) assert vg.tags == ("initial-tag",) assert int(vg.extent_size) == 128 * 1024**2 pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.removeVG(vg_name) # TODO: check this also in read-only mode. vgs fail now after removing the # vg, and this cause 10 retries that take 15 seconds. # We remove the VG with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name) # But keep the PVs, not sure why. pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == ""
def test_vg_create_remove_single_device(tmp_storage, read_only): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.set_read_only(read_only) vg = lvm.getVG(vg_name) assert vg.name == vg_name assert vg.pv_name == (dev, ) assert vg.tags == ("initial-tag", ) assert int(vg.extent_size) == 128 * 1024**2 pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.removeVG(vg_name) # TODO: check this also in read-only mode. vgs fail now after removing the # vg, and this cause 10 retries that take 15 seconds. # We remove the VG with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name) # But keep the PVs, not sure why. pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == ""
def test_vg_add_delete_tags(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.changeVGTags( vg_name, delTags=("initial-tag",), addTags=("new-tag-1", "new-tag-2")) lvm.changeVGTags( vg_name, delTags=["initial-tag"], addTags=["new-tag-1", "new-tag-2"]) vg = lvm.getVG(vg_name) assert sorted(vg.tags) == ["new-tag-1", "new-tag-2"]
def test_retry_with_wider_filter(tmp_storage): lvm.set_read_only(False) # Force reload of the cache. The system does not know about any device at # this point. lvm.getAllPVs() # Create a device - this device in not the lvm cached filter yet. dev = tmp_storage.create_device(20 * 1024**3) # We run vgcreate with explicit devices argument, so the filter is correct # and it succeeds. vg_name = str(uuid.uuid4()) lvm.createVG(vg_name, [dev], "initial-tag", 128) # The cached filter is stale at this point, and so is the vg metadata in # the cache. Running "vgs vg-name" fails because of the stale filter, so we # invalidate the filter and run it again. vg = lvm.getVG(vg_name) assert vg.pv_name == (dev, )
def test_lv_extend_reduce(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) lvm.extendLV(vg_name, lv_name, 2048) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * 1024**3 # Reducing active LV requires force. lvm.reduceLV(vg_name, lv_name, 1024, force=True) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 1 * 1024**3
def test_retry_with_wider_filter(tmp_storage): lvm.set_read_only(False) # Force reload of the cache. The system does not know about any device at # this point. lvm.getAllPVs() # Create a device - this device in not the lvm cached filter yet. dev = tmp_storage.create_device(20 * 1024**3) # We run vgcreate with explicit devices argument, so the filter is correct # and it succeeds. vg_name = str(uuid.uuid4()) lvm.createVG(vg_name, [dev], "initial-tag", 128) # The cached filter is stale at this point, and so is the vg metadata in # the cache. Running "vgs vg-name" fails because of the stale filter, so we # invalidate the filter and run it again. vg = lvm.getVG(vg_name) assert vg.pv_name == (dev,)
def test_create_instance_block_size_mismatch(tmp_storage, tmp_repo, fake_sanlock): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(10 * GiB) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid, domainName="test", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=5, storageType=sd.ISCSI_DOMAIN) # Change metadata to report the wrong block size for current storage. dom.setMetaParam(sd.DMDK_BLOCK_SIZE, sc.BLOCK_SIZE_4K) # Creating a new instance should fail now. with pytest.raises(se.StorageDomainBlockSizeMismatch): blockSD.BlockStorageDomain(sd_uuid)
def test_lv_refresh(tmp_storage, read_only): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lv_fullname = "{}/{}".format(vg_name, lv_name) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) lvm.set_read_only(read_only) # Simulate extending the LV on the SPM. commands.run([ "lvextend", "--config", tmp_storage.lvm_config(), "-L+1g", lv_fullname ]) # Refreshing LV invalidates the cache to pick up changes from storage. lvm.refreshLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * 1024**3 # Simulate extending the LV on the SPM. commands.run([ "lvextend", "--config", tmp_storage.lvm_config(), "-L+1g", lv_fullname ]) # Activate active LV refreshes it. lvm.activateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 3 * 1024**3
def test_lv_refresh(tmp_storage, read_only): dev_size = 20 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lv_fullname = "{}/{}".format(vg_name, lv_name) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) lvm.set_read_only(read_only) # Simulate extending the LV on the SPM. commands.run([ "lvextend", "--config", tmp_storage.lvm_config(), "-L+1g", lv_fullname ]) # Refreshing LV invalidates the cache to pick up changes from storage. lvm.refreshLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * GiB # Simulate extending the LV on the SPM. commands.run([ "lvextend", "--config", tmp_storage.lvm_config(), "-L+1g", lv_fullname ]) # Activate active LV refreshes it. lvm.activateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 3 * GiB
def test_vg_invalidate_lvs_pvs(tmp_storage): dev_size = 1 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, "lv1", 128, activate=False) # Reload cache. pv = lvm.getPV(dev) vg = lvm.getVG(vg_name) lv = lvm.getLV(vg_name)[0] assert lvm._lvminfo._pvs == {dev: pv} clear_stats() lvm._lvminfo.getPvs(vg_name) # getPVs() first finds the VG using getVG(), so there is a cache hit. # No stale PVs for the VG so getPVs() will have another cache hit. check_stats(hits=2, misses=0) assert lvm._lvminfo._vgs == {vg_name: vg} assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lv} # Invalidate VG including LVs and PVs. lvm.invalidateVG(vg_name, invalidatePVs=True) assert lvm._lvminfo._vgs == {vg_name: lvm.Stale(vg_name)} assert lvm._lvminfo._pvs == {dev: lvm.Stale(dev)} clear_stats() lvm._lvminfo.getPvs(vg_name) # getPVs() will not find the invalidated VG in cache, so there is a miss. # There are stale PVs for the VG so getPVs() will have another cache miss. check_stats(hits=0, misses=2) assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lvm.Stale("lv1")}
def test_create_domain_metadata(tmp_storage, tmp_repo, fake_sanlock, domain_version): sd_uuid = str(uuid.uuid4()) domain_name = "loop-domain" dev1 = tmp_storage.create_device(10 * 1024**3) dev2 = tmp_storage.create_device(10 * 1024**3) lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) pv1 = lvm.getPV(dev1) pv2 = lvm.getPV(dev2) dom = blockSD.BlockStorageDomain.create( sdUUID=sd_uuid, domainName=domain_name, domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) lease = sd.DEFAULT_LEASE_PARAMS expected = { # Common storage domain values. sd.DMDK_CLASS: sd.DATA_DOMAIN, sd.DMDK_DESCRIPTION: domain_name, sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC], sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES], sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC], sd.DMDK_LOCK_POLICY: "", sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC: lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC], sd.DMDK_POOLS: [], sd.DMDK_ROLE: sd.REGULAR_DOMAIN, sd.DMDK_SDUUID: sd_uuid, sd.DMDK_TYPE: sd.ISCSI_DOMAIN, sd.DMDK_VERSION: domain_version, # Block storge domain extra values. blockSD.DMDK_VGUUID: vg.uuid, # PV keys for blockSD.DMDK_PV_REGEX. "PV0": { 'guid': os.path.basename(dev1), 'mapoffset': '0', 'pecount': '77', 'pestart': '0', 'uuid': pv1.uuid, }, "PV1": { 'guid': os.path.basename(dev2), 'mapoffset': '77', 'pecount': '77', 'pestart': '0', 'uuid': pv2.uuid, }, } # In version 5 we removed LOGBLKSIZE and PHYBLKSIZE and added # ALIGNMENT and BLOCK_SIZE. if domain_version < 5: expected[sd.DMDK_LOGBLKSIZE] = sc.BLOCK_SIZE_512 expected[sd.DMDK_PHYBLKSIZE] = sc.BLOCK_SIZE_512 else: expected[sd.DMDK_ALIGNMENT] = sc.ALIGNMENT_1M expected[sd.DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512 actual = dom.getMetadata() assert expected == actual # Check that first PV is device where metadata is stored. assert dev1 == lvm.getVgMetadataPv(dom.sdUUID) lv = lvm.getLV(dom.sdUUID, sd.METADATA) assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * constants.MEGAB
def test_create_snapshot_size( tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock, domain_version): # This test was added to verify fix for https://bugzilla.redhat.com/1700623 # As a result of this bug, there can be volumes with corrupted metadata # capacity. The metadata of such volume should be fixed when the volume is # prepared. As the creation of tmp storage for block SD is time consuming, # let's test this flow also in this test. sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * 1024 ** 3) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create( sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) dom.refresh() dom.attach(tmp_repo.pool_id) img_uuid = str(uuid.uuid4()) parent_vol_uuid = str(uuid.uuid4()) parent_vol_capacity = constants.GIB vol_uuid = str(uuid.uuid4()) vol_capacity = 2 * parent_vol_capacity # Create parent volume. dom.createVolume( imgUUID=img_uuid, size=parent_vol_capacity // sc.BLOCK_SIZE_512, volFormat=sc.RAW_FORMAT, preallocate=sc.PREALLOCATED_VOL, diskType='DATA', volUUID=parent_vol_uuid, desc="Test parent volume", srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, initialSize=None) parent_vol = dom.produceVolume(img_uuid, parent_vol_uuid) # Verify that snapshot cannot be smaller than parent. with pytest.raises(se.InvalidParameterException): dom.createVolume( imgUUID=img_uuid, size=parent_vol.getSize() - 1, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=vol_uuid, desc="Extended volume", srcImgUUID=parent_vol.imgUUID, srcVolUUID=parent_vol.volUUID, initialSize=None) # Verify that snapshot can be bigger than parent. dom.createVolume( imgUUID=img_uuid, size=vol_capacity // sc.BLOCK_SIZE_512, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType='DATA', volUUID=vol_uuid, desc="Extended volume", srcImgUUID=parent_vol.imgUUID, srcVolUUID=parent_vol.volUUID, initialSize=None) vol = dom.produceVolume(img_uuid, vol_uuid) # Verify volume sizes obtained from metadata actual_parent = parent_vol.getInfo() assert int(actual_parent["capacity"]) == parent_vol_capacity actual = vol.getInfo() assert int(actual["capacity"]) == vol_capacity # Now test the flow in which metadata capacity is corrupted. # Corrupt the metadata capacity manually. md = vol.getMetadata() md.capacity = vol_capacity // 2 vol.setMetadata(md) # During preparation of the volume, matadata capacity should be fixed. vol.prepare() actual = vol.getInfo() assert int(actual["capacity"]) == vol_capacity
def test_volume_metadata(tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock): sd_uuid = str(uuid.uuid4()) dev = tmp_storage.create_device(20 * 1024 ** 3) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create( sdUUID=sd_uuid, domainName="domain", domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=4, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) dom.refresh() dom.attach(tmp_repo.pool_id) img_uuid = str(uuid.uuid4()) vol_uuid = str(uuid.uuid4()) dom.createVolume( desc="old description", diskType="DATA", imgUUID=img_uuid, preallocate=sc.SPARSE_VOL, size=10 * 1024**3, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID, volFormat=sc.COW_FORMAT, volUUID=vol_uuid) vol = dom.produceVolume(img_uuid, vol_uuid) # Test metadata offset _, slot = vol.getMetadataId() offset = dom.manifest.metadata_offset(slot) assert offset == slot * blockSD.METADATA_SLOT_SIZE_V4 meta_path = dom.manifest.metadata_volume_path() # Change metadata. md = vol.getMetadata() md.description = "new description" vol.setMetadata(md) with open(meta_path) as f: f.seek(offset) data = f.read(sc.METADATA_SIZE) data = data.rstrip("\0") assert data == md.storage_format(4) # Add additioanl metadata. md = vol.getMetadata() vol.setMetadata(md, CAP=md.capacity) with open(meta_path) as f: f.seek(offset) data = f.read(sc.METADATA_SIZE) data = data.rstrip("\0") assert data == md.storage_format(4, CAP=md.capacity)
def test_volume_life_cycle(monkeypatch, tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task, fake_sanlock): # as creation of block storage domain and volume is quite time consuming, # we test several volume operations in one test to speed up the test suite sd_uuid = str(uuid.uuid4()) domain_name = "domain" domain_version = 4 dev = tmp_storage.create_device(20 * 1024 ** 3) lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128) vg = lvm.getVG(sd_uuid) dom = blockSD.BlockStorageDomain.create( sdUUID=sd_uuid, domainName=domain_name, domClass=sd.DATA_DOMAIN, vgUUID=vg.uuid, version=domain_version, storageType=sd.ISCSI_DOMAIN, block_size=sc.BLOCK_SIZE_512, alignment=sc.ALIGNMENT_1M) sdCache.knownSDs[sd_uuid] = blockSD.findDomain sdCache.manuallyAddDomain(dom) img_uuid = str(uuid.uuid4()) vol_uuid = str(uuid.uuid4()) vol_capacity = 10 * 1024**3 vol_size = vol_capacity // sc.BLOCK_SIZE_512 vol_desc = "Test volume" # Create domain directory structure. dom.refresh() # Attache repo pool - SD expects at least one pool is attached. dom.attach(tmp_repo.pool_id) with monkeypatch.context() as mc: mc.setattr(time, "time", lambda: 1550522547) dom.createVolume( imgUUID=img_uuid, size=vol_size, volFormat=sc.COW_FORMAT, preallocate=sc.SPARSE_VOL, diskType=sc.DATA_DISKTYPE, volUUID=vol_uuid, desc=vol_desc, srcImgUUID=sc.BLANK_UUID, srcVolUUID=sc.BLANK_UUID) # test create volume vol = dom.produceVolume(img_uuid, vol_uuid) actual = vol.getInfo() expected_lease = { "offset": ((blockSD.RESERVED_LEASES + 4) * sc.BLOCK_SIZE_512 * sd.LEASE_BLOCKS), "owners": [], "path": "/dev/{}/leases".format(sd_uuid), "version": None, } assert int(actual["capacity"]) == vol_capacity assert int(actual["ctime"]) == 1550522547 assert actual["description"] == vol_desc assert actual["disktype"] == "DATA" assert actual["domain"] == sd_uuid assert actual["format"] == "COW" assert actual["lease"] == expected_lease assert actual["parent"] == sc.BLANK_UUID assert actual["status"] == "OK" assert actual["type"] == "SPARSE" assert actual["voltype"] == "LEAF" assert actual["uuid"] == vol_uuid vol_path = vol.getVolumePath() # Keep the slot before deleting the volume. _, slot = vol.getMetadataId() # test volume prepare assert os.path.islink(vol_path) assert not os.path.exists(vol_path) vol.prepare() assert os.path.exists(vol_path) # verify we can really write and read to an image qemuio.write_pattern(vol_path, "qcow2") qemuio.verify_pattern(vol_path, "qcow2") # test volume teardown vol.teardown(sd_uuid, vol_uuid) assert os.path.islink(vol_path) assert not os.path.exists(vol_path) # test also deleting of the volume vol.delete(postZero=False, force=False, discard=False) # verify lvm with volume is deleted assert not os.path.islink(vol.getVolumePath()) with pytest.raises(se.LogicalVolumeDoesNotExistError): lvm.getLV(sd_uuid, vol_uuid) # verify also metadata from metadata lv is deleted data = dom.manifest.read_metadata_block(slot) assert data == b"\0" * sc.METADATA_SIZE