def test_vg_invalidate_lvs(tmp_storage): dev_size = 1 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, "lv1", 128, activate=False) # Reload cache. pv = lvm.getPV(dev) vg = lvm.getVG(vg_name) lv = lvm.getLV(vg_name)[0] assert lvm._lvminfo._pvs == {dev: pv} assert lvm._lvminfo._vgs == {vg_name: vg} assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lv} # Invalidate VG including LVs. lvm.invalidateVG(vg_name) assert lvm._lvminfo._pvs == {dev: pv} assert lvm._lvminfo._vgs == {vg_name: lvm.Stale(vg_name)} assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lvm.Stale("lv1")}
def stale_lv(tmp_storage): dev_size = 1 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) good_lv_name = "good" stale_lv_name = "stale" lvm.set_read_only(False) # Create VG with 2 lvs. lvm.createVG(vg_name, [dev], "initial-tag", 128) for lv_name in (good_lv_name, stale_lv_name): lvm.createLV(vg_name, lv_name, 128, activate=False) # Reload the cache. good_lv = lvm.getLV(vg_name, good_lv_name) stale_lv = lvm.getLV(vg_name, stale_lv_name) # Simulate removal of the second LV on another host, leaving stale LV in # the cache. commands.run([ "lvremove", "--config", tmp_storage.lvm_config(), "{}/{}".format(vg_name, stale_lv_name), ]) # The cache still keeps both lvs. assert lvm._lvminfo._lvs == { (vg_name, good_lv_name): good_lv, (vg_name, stale_lv_name): stale_lv, } return vg_name, good_lv_name, stale_lv_name
def test_lv_activate_deactivate(tmp_storage, read_only): dev_size = 20 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024, activate=False) lvm.set_read_only(read_only) lv = lvm.getLV(vg_name, lv_name) assert not lv.active # Activate the inactive lv. lvm.activateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert lv.active # Deactivate the active lv. lvm.deactivateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert not lv.active
def stale_vg(tmp_storage): dev_size = 1 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) good_vg_name = str(uuid.uuid4()) stale_vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # Create 1 VGs lvm.createVG(good_vg_name, [dev1], "initial-tag", 128) lvm.createVG(stale_vg_name, [dev2], "initial-tag", 128) # Reload the cache. vgs = sorted(vg.name for vg in lvm.getAllVGs()) assert vgs == sorted([good_vg_name, stale_vg_name]) # Simulate removal of the second VG on another host, leaving stale VG in # the cache. commands.run([ "vgremove", "--config", tmp_storage.lvm_config(), stale_vg_name, ]) # We still report both vgs. vgs = sorted(vg.name for vg in lvm.getAllVGs()) assert vgs == sorted([good_vg_name, stale_vg_name]) return good_vg_name, stale_vg_name
def test_reload_lvs_with_stale_lv(tmp_storage): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv1 = "lv1" lv2 = "lv2" # Creating VG and LV requires read-write mode. lvm.set_read_only(False) lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) # Create the LVs. lvm.createLV(vg_name, lv1, 1024) lvm.createLV(vg_name, lv2, 1024) # Make sure that LVs are in the cache. expected_lv1 = lvm.getLV(vg_name, lv1) expected_lv2 = lvm.getLV(vg_name, lv2) # Simulate LV removed on the SPM while this host keeps it in the cache. commands.run([ "lvremove", "-f", "--config", tmp_storage.lvm_config(), "{}/{}".format(vg_name, lv2) ]) # Test removing staled LVs in LVMCache._reloadlvs() which can be invoked # e.g. by calling lvm.getLv(vg_name). lvs = lvm.getLV(vg_name) # And verify that first LV is still correctly reported. assert expected_lv1 in lvs assert expected_lv2 not in lvs
def test_lv_activate_deactivate(tmp_storage, read_only): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024, activate=False) lvm.set_read_only(read_only) lv = lvm.getLV(vg_name, lv_name) assert not lv.active # Activate the inactive lv. lvm.activateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert lv.active # Deactivate the active lv. lvm.deactivateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert not lv.active
def stale_pv(tmp_storage): dev_size = 1 * 1024**3 good_pv_name = tmp_storage.create_device(dev_size) stale_pv_name = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # Create VG with 2 PVs. lvm.createVG(vg_name, [good_pv_name, stale_pv_name], "initial-tag", 128) # Reload the cache. pvs = sorted(pv.name for pv in lvm.getAllPVs()) assert pvs == sorted([good_pv_name, stale_pv_name]) # Simulate removal of the second PV on another host, leaving stale PV in # the cache. commands.run([ "vgreduce", "--config", tmp_storage.lvm_config(), vg_name, stale_pv_name, ]) commands.run([ "pvremove", "--config", tmp_storage.lvm_config(), stale_pv_name, ]) # We still report both devies. pvs = sorted(pv.name for pv in lvm.getAllPVs()) assert pvs == sorted([good_pv_name, stale_pv_name]) return vg_name, good_pv_name, stale_pv_name
def test_vg_create_multiple_devices(tmp_storage, read_only): dev_size = 10 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) dev3 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev1, dev2, dev3], "initial-tag", 128) lvm.set_read_only(read_only) vg = lvm.getVG(vg_name) assert vg.name == vg_name assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3)) # pvs is broken with read-only mode # https://bugzilla.redhat.com/1809660. lvm.set_read_only(False) # The first pv (metadata pv) will have the 2 used metadata areas. pv = lvm.getPV(dev1) assert pv.name == dev1 assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 # The rest of the pvs will have 2 unused metadata areas. for dev in dev2, dev3: pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 0 # TODO: should work also in read-only mode. lvm.removeVG(vg_name) lvm.set_read_only(read_only) # We remove the VG with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name) # pvs is broken with read-only mode # https://bugzilla.redhat.com/1809660. lvm.set_read_only(False) # But keep the PVs, not sure why. for dev in dev1, dev2, dev3: pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == ""
def test_vg_invalidate(tmp_storage): dev_size = 1 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg1_name = str(uuid.uuid4()) vg2_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg1_name, [dev1], "initial-tag", 128) lvm.createLV(vg1_name, "lv1", 128, activate=False) lvm.createVG(vg2_name, [dev2], "initial-tag", 128) lvm.createLV(vg2_name, "lv2", 128, activate=False) # Reload cache. pv1 = lvm.getPV(dev1) vg1 = lvm.getVG(vg1_name) lv1 = lvm.getLV(vg1_name)[0] pv2 = lvm.getPV(dev2) vg2 = lvm.getVG(vg2_name) lv2 = lvm.getLV(vg2_name)[0] assert lvm._lvminfo._pvs == {dev1: pv1, dev2: pv2} assert lvm._lvminfo._vgs == {vg1_name: vg1, vg2_name: vg2} assert lvm._lvminfo._lvs == { (vg1_name, "lv1"): lv1, (vg2_name, "lv2"): lv2, } # Invalidate VG including LVs. lvm.invalidateVG(vg1_name, invalidateLVs=False) assert lvm._lvminfo._pvs == {dev1: pv1, dev2: pv2} assert lvm._lvminfo._vgs == { vg1_name: lvm.Stale(vg1_name), vg2_name: vg2, } assert lvm._lvminfo._lvs == { (vg1_name, "lv1"): lv1, (vg2_name, "lv2"): lv2, } # getVGs() always reloads the cache. clear_stats() lvm.getVGs([vg1_name, vg2_name]) check_stats(hits=0, misses=1) assert lvm._lvminfo._vgs == {vg1_name: vg1, vg2_name: vg2}
def test_vg_check(tmp_storage, read_only): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) lvm.set_read_only(read_only) assert lvm.chkVG(vg_name)
def test_vg_check(tmp_storage, read_only): dev_size = 10 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) lvm.set_read_only(read_only) assert lvm.chkVG(vg_name)
def test_bootstrap(tmp_storage, read_only): dev_size = 20 * GiB lvm.set_read_only(False) dev1 = tmp_storage.create_device(dev_size) vg1_name = str(uuid.uuid4()) lvm.createVG(vg1_name, [dev1], "initial-tag", 128) dev2 = tmp_storage.create_device(dev_size) vg2_name = str(uuid.uuid4()) lvm.createVG(vg2_name, [dev2], "initial-tag", 128) vgs = (vg1_name, vg2_name) for vg_name in vgs: # Create active lvs. for lv_name in ("skip", "prepared", "opened", "unused"): lvm.createLV(vg_name, lv_name, 1024) # Create links to prepared lvs. img_dir = os.path.join(sc.P_VDSM_STORAGE, vg_name, "img") os.makedirs(img_dir) os.symlink( lvm.lvPath(vg_name, "prepared"), os.path.join(img_dir, "prepared")) # Open some lvs during bootstrap. vg1_opened = lvm.lvPath(vg1_name, "opened") vg2_opened = lvm.lvPath(vg2_name, "opened") with open(vg1_opened), open(vg2_opened): lvm.set_read_only(read_only) lvm.bootstrap(skiplvs=["skip"]) # Lvs in skiplvs, prepared lvs, and opened lvs should be active. for vg_name in vgs: for lv_name in ("skip", "prepared", "opened"): lv = lvm.getLV(vg_name, lv_name) assert lv.active # Unused lvs should not be active. for vg_name in vgs: lv = lvm.getLV(vg_name, "unused") assert not lv.active
def test_bootstrap(tmp_storage, read_only): dev_size = 20 * 1024**3 lvm.set_read_only(False) dev1 = tmp_storage.create_device(dev_size) vg1_name = str(uuid.uuid4()) lvm.createVG(vg1_name, [dev1], "initial-tag", 128) dev2 = tmp_storage.create_device(dev_size) vg2_name = str(uuid.uuid4()) lvm.createVG(vg2_name, [dev2], "initial-tag", 128) vgs = (vg1_name, vg2_name) for vg_name in vgs: # Create active lvs. for lv_name in ("skip", "prepared", "opened", "unused"): lvm.createLV(vg_name, lv_name, 1024) # Create links to prepared lvs. img_dir = os.path.join(sc.P_VDSM_STORAGE, vg_name, "img") os.makedirs(img_dir) os.symlink( lvm.lvPath(vg_name, "prepared"), os.path.join(img_dir, "prepared")) # Open some lvs during bootstrap. vg1_opened = lvm.lvPath(vg1_name, "opened") vg2_opened = lvm.lvPath(vg2_name, "opened") with open(vg1_opened), open(vg2_opened): lvm.set_read_only(read_only) lvm.bootstrap(skiplvs=["skip"]) # Lvs in skiplvs, prepared lvs, and opened lvs should be active. for vg_name in vgs: for lv_name in ("skip", "prepared", "opened"): lv = lvm.getLV(vg_name, lv_name) assert lv.active # Unused lvs should not be active. for vg_name in vgs: lv = lvm.getLV(vg_name, "unused") assert not lv.active
def test_lv_rename(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) new_lv_name = "renamed-" + lv_name lvm.renameLV(vg_name, lv_name, new_lv_name) lv = lvm.getLV(vg_name, new_lv_name) assert lv.name == new_lv_name
def test_lv_rename(tmp_storage): dev_size = 20 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) new_lv_name = "renamed-" + lv_name lvm.renameLV(vg_name, lv_name, new_lv_name) lv = lvm.getLV(vg_name, new_lv_name) assert lv.name == new_lv_name
def test_vg_add_delete_tags(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.changeVGTags(vg_name, delTags=("initial-tag", ), addTags=("new-tag-1", "new-tag-2")) lvm.changeVGTags(vg_name, delTags=["initial-tag"], addTags=["new-tag-1", "new-tag-2"]) vg = lvm.getVG(vg_name) assert sorted(vg.tags) == ["new-tag-1", "new-tag-2"]
def test_lv_add_delete_tags(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024, activate=False) lvm.changeLVTags(vg_name, lv_name, delTags=("initial-tag", ), addTags=("new-tag-1", "new-tag-2")) lv = lvm.getLV(vg_name, lv_name) assert sorted(lv.tags) == ["new-tag-1", "new-tag-2"]
def test_vg_extend_reduce(tmp_storage): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) dev3 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev1], "initial-tag", 128) vg = lvm.getVG(vg_name) assert vg.pv_name == (dev1,) lvm.extendVG(vg_name, [dev2, dev3], force=False) vg = lvm.getVG(vg_name) assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3)) # The first pv (metadata pv) will have the 2 used metadata areas. pv = lvm.getPV(dev1) assert pv.name == dev1 assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 # The rest of the pvs will have 2 unused metadata areas. for dev in dev2, dev3: pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 0 lvm.reduceVG(vg_name, dev2) vg = lvm.getVG(vg_name) assert sorted(vg.pv_name) == sorted((dev1, dev3)) lvm.removeVG(vg_name) with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name)
def test_vg_extend_reduce(tmp_storage): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) dev3 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev1], "initial-tag", 128) vg = lvm.getVG(vg_name) assert vg.pv_name == (dev1, ) lvm.extendVG(vg_name, [dev2, dev3], force=False) vg = lvm.getVG(vg_name) assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3)) # The first pv (metadata pv) will have the 2 used metadata areas. pv = lvm.getPV(dev1) assert pv.name == dev1 assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 # The rest of the pvs will have 2 unused metadata areas. for dev in dev2, dev3: pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 0 lvm.reduceVG(vg_name, dev2) vg = lvm.getVG(vg_name) assert sorted(vg.pv_name) == sorted((dev1, dev3)) lvm.removeVG(vg_name) with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name)
def test_lv_add_delete_tags(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024, activate=False) lvm.changeLVTags( vg_name, lv_name, delTags=("initial-tag",), addTags=("new-tag-1", "new-tag-2")) lv = lvm.getLV(vg_name, lv_name) assert sorted(lv.tags) == ["new-tag-1", "new-tag-2"]
def test_lv_extend_reduce(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) lvm.extendLV(vg_name, lv_name, 2048) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * 1024**3 # Reducing active LV requires force. lvm.reduceLV(vg_name, lv_name, 1024, force=True) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 1 * 1024**3
def test_vg_add_delete_tags(tmp_storage): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.changeVGTags( vg_name, delTags=("initial-tag",), addTags=("new-tag-1", "new-tag-2")) lvm.changeVGTags( vg_name, delTags=["initial-tag"], addTags=["new-tag-1", "new-tag-2"]) vg = lvm.getVG(vg_name) assert sorted(vg.tags) == ["new-tag-1", "new-tag-2"]
def test_retry_with_wider_filter(tmp_storage): lvm.set_read_only(False) # Force reload of the cache. The system does not know about any device at # this point. lvm.getAllPVs() # Create a device - this device in not the lvm cached filter yet. dev = tmp_storage.create_device(20 * 1024**3) # We run vgcreate with explicit devices argument, so the filter is correct # and it succeeds. vg_name = str(uuid.uuid4()) lvm.createVG(vg_name, [dev], "initial-tag", 128) # The cached filter is stale at this point, and so is the vg metadata in # the cache. Running "vgs vg-name" fails because of the stale filter, so we # invalidate the filter and run it again. vg = lvm.getVG(vg_name) assert vg.pv_name == (dev, )
def test_retry_with_wider_filter(tmp_storage): lvm.set_read_only(False) # Force reload of the cache. The system does not know about any device at # this point. lvm.getAllPVs() # Create a device - this device in not the lvm cached filter yet. dev = tmp_storage.create_device(20 * 1024**3) # We run vgcreate with explicit devices argument, so the filter is correct # and it succeeds. vg_name = str(uuid.uuid4()) lvm.createVG(vg_name, [dev], "initial-tag", 128) # The cached filter is stale at this point, and so is the vg metadata in # the cache. Running "vgs vg-name" fails because of the stale filter, so we # invalidate the filter and run it again. vg = lvm.getVG(vg_name) assert vg.pv_name == (dev,)
def test_lv_refresh(tmp_storage, read_only): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lv_fullname = "{}/{}".format(vg_name, lv_name) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) lvm.set_read_only(read_only) # Simulate extending the LV on the SPM. commands.run([ "lvextend", "--config", tmp_storage.lvm_config(), "-L+1g", lv_fullname ]) # Refreshing LV invalidates the cache to pick up changes from storage. lvm.refreshLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * 1024**3 # Simulate extending the LV on the SPM. commands.run([ "lvextend", "--config", tmp_storage.lvm_config(), "-L+1g", lv_fullname ]) # Activate active LV refreshes it. lvm.activateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 3 * 1024**3
def test_retry_with_wider_filter(tmp_storage, read_only): lvm.set_read_only(read_only) # Force reload of the cache. The system does not know about any device at # this point. lvm.getAllPVs() # Create a device - this device in not the lvm cached filter yet. dev = tmp_storage.create_device(20 * GiB) # Creating VG requires read-write mode. lvm.set_read_only(False) # We run vgcreate with explicit devices argument, so the filter is correct # and it succeeds. vg_name = str(uuid.uuid4()) lvm.createVG(vg_name, [dev], "initial-tag", 128) # Checking VG must work in both read-only and read-write modes. lvm.set_read_only(read_only) # The cached filter is stale at this point, and so is the vg metadata in # the cache. Running "vgs --select 'vg_name = vg-name'" will return no data # because of the stale filter, so we invalidate the filter and run it # again. vg = lvm.getVG(vg_name) assert vg.pv_name == (dev, )
def test_lv_refresh(tmp_storage, read_only): dev_size = 20 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lv_fullname = "{}/{}".format(vg_name, lv_name) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) lvm.set_read_only(read_only) # Simulate extending the LV on the SPM. commands.run([ "lvextend", "--config", tmp_storage.lvm_config(), "-L+1g", lv_fullname ]) # Refreshing LV invalidates the cache to pick up changes from storage. lvm.refreshLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * GiB # Simulate extending the LV on the SPM. commands.run([ "lvextend", "--config", tmp_storage.lvm_config(), "-L+1g", lv_fullname ]) # Activate active LV refreshes it. lvm.activateLVs(vg_name, [lv_name]) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 3 * GiB
def test_vg_invalidate_lvs_pvs(tmp_storage): dev_size = 1 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, "lv1", 128, activate=False) # Reload cache. pv = lvm.getPV(dev) vg = lvm.getVG(vg_name) lv = lvm.getLV(vg_name)[0] assert lvm._lvminfo._pvs == {dev: pv} clear_stats() lvm._lvminfo.getPvs(vg_name) # getPVs() first finds the VG using getVG(), so there is a cache hit. # No stale PVs for the VG so getPVs() will have another cache hit. check_stats(hits=2, misses=0) assert lvm._lvminfo._vgs == {vg_name: vg} assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lv} # Invalidate VG including LVs and PVs. lvm.invalidateVG(vg_name, invalidatePVs=True) assert lvm._lvminfo._vgs == {vg_name: lvm.Stale(vg_name)} assert lvm._lvminfo._pvs == {dev: lvm.Stale(dev)} clear_stats() lvm._lvminfo.getPvs(vg_name) # getPVs() will not find the invalidated VG in cache, so there is a miss. # There are stale PVs for the VG so getPVs() will have another cache miss. check_stats(hits=0, misses=2) assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lvm.Stale("lv1")}
def test_vg_invalidate_lvs(tmp_storage): dev_size = 1 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, "lv1", 128, activate=False) # Reload cache. pv = lvm.getPV(dev) vg = lvm.getVG(vg_name) clear_stats() lv = lvm.getLV(vg_name)[0] check_stats(hits=0, misses=1) # Accessing LVs always access storage. # TODO: Use cache if VG did not change. lvm.getLV(vg_name) check_stats(hits=0, misses=2) assert lvm._lvminfo._pvs == {dev: pv} assert lvm._lvminfo._vgs == {vg_name: vg} assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lv} # Invalidate VG including LVs. lvm.invalidateVG(vg_name) assert lvm._lvminfo._pvs == {dev: pv} assert lvm._lvminfo._vgs == {vg_name: lvm.Stale(vg_name)} assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lvm.Stale("lv1")} # Accessing LVs always access storage. # TODO: Use cache if VG did not change. clear_stats() lvm.getLV(vg_name) check_stats(hits=0, misses=1)
def test_lv_extend_reduce(tmp_storage): dev_size = 20 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.createLV(vg_name, lv_name, 1024) lvm.extendLV(vg_name, lv_name, 2048) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * GiB # Extending LV to same does nothing. lvm.extendLV(vg_name, lv_name, 2048) lvm.invalidateVG(vg_name) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * GiB # Extending LV to smaller size does nothing. lvm.extendLV(vg_name, lv_name, 1024) lvm.invalidateVG(vg_name) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 2 * GiB # Reducing active LV requires force. lvm.reduceLV(vg_name, lv_name, 1024, force=True) lv = lvm.getLV(vg_name, lv_name) assert int(lv.size) == 1 * GiB
def test_vg_create_multiple_devices(tmp_storage, read_only): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) dev3 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev1, dev2, dev3], "initial-tag", 128) lvm.set_read_only(read_only) vg = lvm.getVG(vg_name) assert vg.name == vg_name assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3)) # The first pv (metadata pv) will have the 2 used metadata areas. pv = lvm.getPV(dev1) assert pv.name == dev1 assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 # The rest of the pvs will have 2 unused metadata areas. for dev in dev2, dev3: pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 0 lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.removeVG(vg_name) # TODO: check this also in read-only mode. vgs fail now after removing the # vg, and this cause 10 retries that take 15 seconds. # We remove the VG with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name) # But keep the PVs, not sure why. for dev in dev1, dev2, dev3: pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == ""
def test_vg_create_remove_single_device(tmp_storage, read_only): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.set_read_only(read_only) vg = lvm.getVG(vg_name) assert vg.name == vg_name assert vg.pv_name == (dev,) assert vg.tags == ("initial-tag",) assert int(vg.extent_size) == 128 * 1024**2 pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.removeVG(vg_name) # TODO: check this also in read-only mode. vgs fail now after removing the # vg, and this cause 10 retries that take 15 seconds. # We remove the VG with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name) # But keep the PVs, not sure why. pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == ""
def test_vg_create_remove_single_device(tmp_storage, read_only): dev_size = 20 * 1024**3 dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.set_read_only(read_only) vg = lvm.getVG(vg_name) assert vg.name == vg_name assert vg.pv_name == (dev, ) assert vg.tags == ("initial-tag", ) assert int(vg.extent_size) == 128 * 1024**2 pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.removeVG(vg_name) # TODO: check this also in read-only mode. vgs fail now after removing the # vg, and this cause 10 retries that take 15 seconds. # We remove the VG with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name) # But keep the PVs, not sure why. pv = lvm.getPV(dev) assert pv.name == dev assert pv.vg_name == ""
def test_retry_with_wider_filter(tmp_storage, read_only): lvm.set_read_only(read_only) # Force reload of the cache. The system does not know about any device at # this point. clear_stats() lvm.getAllPVs() check_stats(hits=0, misses=1) # Create a device - this device in not the lvm cached filter yet. dev = tmp_storage.create_device(20 * GiB) # Creating VG requires read-write mode. lvm.set_read_only(False) # We run vgcreate with explicit devices argument, so the filter is correct # and it succeeds. vg_name = str(uuid.uuid4()) lvm.createVG(vg_name, [dev], "initial-tag", 128) # Calling getAllPVs() have cache miss since createVG invalidates the PVs. clear_stats() lvm.getAllPVs() check_stats(hits=0, misses=1) # Second call for getAllPVs() adds cache hit since the new PV was reloaded. lvm.getAllPVs() check_stats(hits=1, misses=1) # Checking VG must work in both read-only and read-write modes. lvm.set_read_only(read_only) # The cached filter is stale at this point, and so is the vg metadata in # the cache. Running "vgs vg-name" fails because of the stale filter, so we # invalidate the filter and run it again. vg = lvm.getVG(vg_name) assert vg.pv_name == (dev,)
def test_vg_extend_reduce(tmp_storage): dev_size = 10 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) dev3 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) lvm.createVG(vg_name, [dev1], "initial-tag", 128) clear_stats() vg = lvm.getVG(vg_name) check_stats(hits=0, misses=1) # Call getVG() again will get cache hit. lvm.getVG(vg_name) check_stats(hits=1, misses=1) assert vg.pv_name == (dev1,) lvm.extendVG(vg_name, [dev2, dev3], force=False) clear_stats() vg = lvm.getVG(vg_name) # Calling getVG() after extendVG() does not use the cache. # This happens because extendVG() invalidates the VG. check_stats(hits=0, misses=1) assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3)) clear_stats() # The first pv (metadata pv) will have the 2 used metadata areas. pv = lvm.getPV(dev1) check_stats(hits=0, misses=1) assert pv.name == dev1 assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 # The rest of the pvs will have 2 unused metadata areas. for dev in dev2, dev3: clear_stats() pv = lvm.getPV(dev) check_stats(hits=0, misses=1) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 0 lvm.reduceVG(vg_name, dev2) clear_stats() vg = lvm.getVG(vg_name) # Calling getVG() after reduceVG() does not use the cache. # This happens because reduceVG() invalidates the VG. check_stats(hits=0, misses=1) assert sorted(vg.pv_name) == sorted((dev1, dev3)) lvm.removeVG(vg_name) clear_stats() with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name) check_stats(hits=0, misses=1)
def test_vg_create_remove_single_device(tmp_storage, read_only): dev_size = 20 * GiB dev = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.createVG(vg_name, [dev], "initial-tag", 128) lvm.set_read_only(read_only) clear_stats() vg = lvm.getVG(vg_name) check_stats(hits=0, misses=1) assert vg.name == vg_name assert vg.pv_name == (dev,) assert vg.tags == ("initial-tag",) assert int(vg.extent_size) == 128 * MiB # pvs is broken with read-only mode # https://bugzilla.redhat.com/1809660. lvm.set_read_only(False) clear_stats() pv = lvm.getPV(dev) check_stats(hits=0, misses=1) # Call getPV again to see we also get cache hit. lvm.getPV(dev) check_stats(hits=1, misses=1) lvm.set_read_only(read_only) assert pv.name == dev assert pv.vg_name == vg_name assert int(pv.dev_size) == dev_size assert int(pv.mda_count) == 2 assert int(pv.mda_used_count) == 2 lvm.set_read_only(False) # TODO: should work also in read-only mode. lvm.removeVG(vg_name) lvm.set_read_only(read_only) # We remove the VG clear_stats() with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(vg_name) check_stats(hits=0, misses=1) # pvs is broken with read-only mode # https://bugzilla.redhat.com/1809660. lvm.set_read_only(False) # But keep the PVs, not sure why. clear_stats() pv = lvm.getPV(dev) check_stats(hits=0, misses=1) assert pv.name == dev assert pv.vg_name == ""
def test_lv_create_remove(tmp_storage, read_only): dev_size = 10 * 1024**3 dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_any = "lv-on-any-device" lv_specific = "lv-on-device-2" # Creating VG and LV requires read-write mode. lvm.set_read_only(False) lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) # Create the first LV on any device. lvm.createLV(vg_name, lv_any, 1024) # Getting lv must work in both read-only and read-write modes. lvm.set_read_only(read_only) lv = lvm.getLV(vg_name, lv_any) assert lv.name == lv_any assert lv.vg_name == vg_name assert int(lv.size) == 1024**3 assert lv.tags == () assert lv.writeable assert not lv.opened assert lv.active # LV typically created on dev1. device, extent = lvm.getFirstExt(vg_name, lv_any) assert device in dev1, dev2 assert extent == "0" # Create the second LV on dev2 - reuquires read-write mode. lvm.set_read_only(False) lvm.createLV(vg_name, lv_specific, 1024, device=dev2) # Testing LV must work in both read-only and read-write modes. lvm.set_read_only(read_only) device, extent = lvm.getFirstExt(vg_name, lv_specific) assert device == dev2 # Remove both LVs - requires read-write mode. lvm.set_read_only(False) lvm.removeLVs(vg_name, [lv_any, lv_specific]) # Testing if lv exists most work in both read-only and read-write. lvm.set_read_only(read_only) for lv_name in (lv_any, lv_specific): with pytest.raises(se.LogicalVolumeDoesNotExistError): lvm.getLV(vg_name, lv_name)
def test_lv_create_remove(tmp_storage, read_only): dev_size = 10 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv_any = "lv-on-any-device" lv_specific = "lv-on-device-2" # Creating VG and LV requires read-write mode. lvm.set_read_only(False) lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) # Create the first LV on any device. lvm.createLV(vg_name, lv_any, 1024) # Getting lv must work in both read-only and read-write modes. lvm.set_read_only(read_only) clear_stats() lv = lvm.getLV(vg_name, lv_any) check_stats(hits=0, misses=1) # Call getLV() again will have cache hit. lvm.getLV(vg_name, lv_any) check_stats(hits=1, misses=1) assert lv.name == lv_any assert lv.vg_name == vg_name assert int(lv.size) == GiB assert lv.tags == () assert lv.writeable assert not lv.opened assert lv.active # LV typically created on dev1. device, extent = lvm.getFirstExt(vg_name, lv_any) assert device in dev1, dev2 assert extent == "0" # Create the second LV on dev2 - reuquires read-write mode. lvm.set_read_only(False) lvm.createLV(vg_name, lv_specific, 1024, device=dev2) # Testing LV must work in both read-only and read-write modes. lvm.set_read_only(read_only) device, extent = lvm.getFirstExt(vg_name, lv_specific) assert device == dev2 # Remove both LVs - requires read-write mode. lvm.set_read_only(False) lvm.removeLVs(vg_name, [lv_any, lv_specific]) # Testing if lv exists most work in both read-only and read-write. lvm.set_read_only(read_only) for lv_name in (lv_any, lv_specific): clear_stats() with pytest.raises(se.LogicalVolumeDoesNotExistError): lvm.getLV(vg_name, lv_name) check_stats(hits=0, misses=1)