def tmp_storage(monkeypatch, tmpdir): """ Provide a temporary storage for creating temporary block devices, and patch vsdm to use it instead of multipath device. """ storage = tmpstorage.TemporaryStorage(str(tmpdir)) # Get devices from our temporary storage instead of multipath. monkeypatch.setattr(multipath, "getMPDevNamesIter", storage.devices) # Use custom /run/vdsm/storage directory, used to keep symlinks to active # lvs. storage_dir = str(tmpdir.join("storage")) os.mkdir(storage_dir) monkeypatch.setattr(sc, "P_VDSM_STORAGE", storage_dir) with closing(storage): # Don't let other test break us... lvm.invalidateCache() try: yield storage finally: # and don't break other tests. lvm.invalidateCache() stats = lvm.cache_stats() log.info("LVM cache hit ratio: %.2f%% (hits: %d misses: %d)", stats["hit_ratio"], stats["hits"], stats["misses"])
def test_get_lvs_after_sd_refresh(tmp_storage): dev_size = 1 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg1_name = str(uuid.uuid4()) vg2_name = str(uuid.uuid4()) # Create two VGs and LVs per each. lvm.createVG(vg1_name, [dev1], "initial-tag", 128) lvm.createVG(vg2_name, [dev2], "initial-tag", 128) lvm.createLV(vg1_name, "lv1", 128, activate=False) lvm.createLV(vg2_name, "lv2", 128, activate=False) # Make sure that LVs are in LVM cache for both VGs. lv1 = lvm.getLV(vg1_name)[0] lv2 = lvm.getLV(vg2_name)[0] # Simulate refresh SD. lvm.invalidateCache() # Reload lvs for vg1. assert lvm.getLV(vg1_name) == [lv1] # Reload lvs for vg2. assert lvm.getLV(vg2_name) == [lv2]
def test_lv_stale_reload_all_clear(stale_lv): vg_name, good_lv_name, stale_lv_name = stale_lv # Drop all cache. lvm.invalidateCache() # Only the good lv is reported. lvs = [lv.name for lv in lvm.getLV(vg_name)] assert lvs == [good_lv_name]
def test_vg_stale_reload_all_clear(stale_vg): good_vg_name, stale_vg_name = stale_vg # Drop all cache. lvm.invalidateCache() # Report only the good vg. vgs = [vg.name for vg in lvm.getAllVGs()] assert vgs == [good_vg_name]
def test_pv_stale_reload_all_clear(stale_pv): vg_name, good_pv_name, stale_pv_name = stale_pv # Drop all cache. lvm.invalidateCache() # Report only the good pv. pv_names = [pv.name for pv in lvm.getAllPVs()] assert pv_names == [good_pv_name]
def test_vg_stale_reload_one_clear(stale_vg): good_vg_name, stale_vg_name = stale_vg # Drop all cache. lvm.invalidateCache() # The good vg is still in the cache. vg = lvm.getVG(good_vg_name) assert vg.name == good_vg_name # The stale vg was removed. with pytest.raises(se.VolumeGroupDoesNotExist): lvm.getVG(stale_vg_name)
def test_lv_stale_reload_one_clear(stale_lv): vg_name, good_lv_name, stale_lv_name = stale_lv # Drop all cache. lvm.invalidateCache() # The good lv is still in the cache. lv = lvm.getLV(vg_name, good_lv_name) assert lv.name == good_lv_name # The stale lv should be removed. with pytest.raises(se.LogicalVolumeDoesNotExistError): lvm.getLV(vg_name, stale_lv_name)
def test_pv_stale_reload_one_clear(stale_pv): vg_name, good_pv_name, stale_pv_name = stale_pv # Drop all cache. lvm.invalidateCache() # The good pv is still in the cache. pv = lvm.getPV(good_pv_name) assert pv.name == good_pv_name # The stale pv shuld be removed. with pytest.raises(se.InaccessiblePhysDev): lvm.getPV(stale_pv_name)
def refreshStorage(self, resize=True): self.__staleStatus = self.STORAGE_REFRESHING multipath.rescan() if resize: multipath.resize_devices() lvm.invalidateCache() # If a new invalidateStorage request came in after the refresh # started then we cannot flag the storages as updated (force a # new rescan later). with self._syncroot: if self.__staleStatus == self.STORAGE_REFRESHING: self.__staleStatus = self.STORAGE_UPDATED
def test_vg_stale_reload_all_clear(stale_vg): good_vg_name, stale_vg_name = stale_vg # Drop all cache. lvm.invalidateCache() clear_stats() # Report only the good vg. vgs = [vg.name for vg in lvm.getAllVGs()] assert vgs == [good_vg_name] check_stats(hits=0, misses=1) # Second call for getAllVGs() will add cache hit. lvm.getAllVGs() check_stats(hits=1, misses=1)
def refreshStorage(self, resize=True): self.log.info("Refreshing storage domain cache (resize=%s)", resize) with utils.stopwatch("Refreshing storage domain cache", level=logging.INFO, log=self.log): self.__staleStatus = self.STORAGE_REFRESHING multipath.rescan() if resize: multipath.resize_devices() lvm.invalidateCache() # If a new invalidateStorage request came in after the refresh # started then we cannot flag the storages as updated (force a # new rescan later). with self._syncroot: if self.__staleStatus == self.STORAGE_REFRESHING: self.__staleStatus = self.STORAGE_UPDATED
def test_reload_lvs_with_stale_lv(tmp_storage): dev_size = 10 * GiB dev1 = tmp_storage.create_device(dev_size) dev2 = tmp_storage.create_device(dev_size) vg_name = str(uuid.uuid4()) lv1 = "lv1" lv2 = "lv2" # Creating VG and LV requires read-write mode. lvm.set_read_only(False) lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128) # Create the LVs. lvm.createLV(vg_name, lv1, 1024) lvm.createLV(vg_name, lv2, 1024) # Make sure that LVs are in the cache. expected_lv1 = lvm.getLV(vg_name, lv1) expected_lv2 = lvm.getLV(vg_name, lv2) # Simulate LV removed on the SPM while this host keeps it in the cache. commands.run([ "lvremove", "-f", "--config", tmp_storage.lvm_config(), "{}/{}".format(vg_name, lv2) ]) # Test removing staled LVs in LVMCache._reloadlvs() which can be invoked # e.g. by calling lvm.getLv(vg_name). lvs = lvm.getLV(vg_name) # And verify that first LV is still correctly reported. assert expected_lv1 in lvs # Second LV is still in cache until it is invalidated. assert expected_lv2 in lvs # Invalidate the LVM cache and retrieve the LVs. lvm.invalidateCache() lvs = lvm.getLV(vg_name) # Verify that first LV is still correctly reported. assert expected_lv1 in lvs # Second LV should be no longer in reported lvs after cache update. assert expected_lv2 not in lvs
def tmp_storage(monkeypatch, tmpdir): """ Provide a temporary storage for creating temporary block devices, and patch vsdm to use it instead of multipath device. """ storage = tmpstorage.TemporaryStorage(str(tmpdir)) # Get devices from our temporary storage instead of multipath. monkeypatch.setattr(multipath, "getMPDevNamesIter", storage.devices) # Use custom /run/vdsm/storage directory, used to keep symlinks to active # lvs. storage_dir = str(tmpdir.join("storage")) os.mkdir(storage_dir) monkeypatch.setattr(sc, "P_VDSM_STORAGE", storage_dir) with closing(storage): # Don't let other test break us... lvm.invalidateCache() try: yield storage finally: # and don't break other tests. lvm.invalidateCache()
def refresh(self): with self._syncroot: lvm.invalidateCache() self.__domainCache.clear()
def refresh(self): self.log.info("Clearing storage domain cache") with self._syncroot: lvm.invalidateCache() self.__domainCache.clear()