Пример #1
0
    def validate(self):
        try:
            lvm.getLV(self.sdUUID, self.volUUID)
        except se.LogicalVolumeDoesNotExistError:
            raise se.VolumeDoesNotExist(self.volUUID)

        volume.VolumeManifest.validate(self)
Пример #2
0
def test_lv_activate_deactivate(tmp_storage, read_only):
    dev_size = 20 * 1024**3
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)
    lvm.createLV(vg_name, lv_name, 1024, activate=False)

    lvm.set_read_only(read_only)

    lv = lvm.getLV(vg_name, lv_name)
    assert not lv.active

    # Activate the inactive lv.
    lvm.activateLVs(vg_name, [lv_name])
    lv = lvm.getLV(vg_name, lv_name)
    assert lv.active

    # Deactivate the active lv.
    lvm.deactivateLVs(vg_name, [lv_name])
    lv = lvm.getLV(vg_name, lv_name)
    assert not lv.active
Пример #3
0
def test_lv_activate_deactivate(tmp_storage, read_only):
    dev_size = 20 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)
    lvm.createLV(vg_name, lv_name, 1024, activate=False)

    lvm.set_read_only(read_only)

    lv = lvm.getLV(vg_name, lv_name)
    assert not lv.active

    # Activate the inactive lv.
    lvm.activateLVs(vg_name, [lv_name])
    lv = lvm.getLV(vg_name, lv_name)
    assert lv.active

    # Deactivate the active lv.
    lvm.deactivateLVs(vg_name, [lv_name])
    lv = lvm.getLV(vg_name, lv_name)
    assert not lv.active
Пример #4
0
def test_lv_create_remove(tmp_storage, read_only):
    dev_size = 10 * GiB
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv_any = "lv-on-any-device"
    lv_specific = "lv-on-device-2"

    # Creating VG and LV requires read-write mode.
    lvm.set_read_only(False)
    lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128)

    # Create the first LV on any device.
    lvm.createLV(vg_name, lv_any, 1024)

    # Getting lv must work in both read-only and read-write modes.
    lvm.set_read_only(read_only)

    clear_stats()
    lv = lvm.getLV(vg_name, lv_any)
    check_stats(hits=0, misses=1)

    # Call getLV() again will have cache hit.
    lvm.getLV(vg_name, lv_any)
    check_stats(hits=1, misses=1)

    assert lv.name == lv_any
    assert lv.vg_name == vg_name
    assert int(lv.size) == GiB
    assert lv.tags == ()
    assert lv.writeable
    assert not lv.opened
    assert lv.active

    # LV typically created on dev1.
    device, extent = lvm.getFirstExt(vg_name, lv_any)
    assert device in dev1, dev2
    assert extent == "0"

    # Create the second LV on dev2 - reuquires read-write mode.
    lvm.set_read_only(False)
    lvm.createLV(vg_name, lv_specific, 1024, device=dev2)

    # Testing LV must work in both read-only and read-write modes.
    lvm.set_read_only(read_only)

    device, extent = lvm.getFirstExt(vg_name, lv_specific)
    assert device == dev2

    # Remove both LVs - requires read-write mode.
    lvm.set_read_only(False)
    lvm.removeLVs(vg_name, [lv_any, lv_specific])

    # Testing if lv exists most work in both read-only and read-write.
    lvm.set_read_only(read_only)
    for lv_name in (lv_any, lv_specific):
        clear_stats()
        with pytest.raises(se.LogicalVolumeDoesNotExistError):
            lvm.getLV(vg_name, lv_name)
        check_stats(hits=0, misses=1)
Пример #5
0
def test_get_lvs_after_sd_refresh(tmp_storage):
    dev_size = 1 * GiB
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    vg1_name = str(uuid.uuid4())
    vg2_name = str(uuid.uuid4())

    # Create two VGs and LVs per each.
    lvm.createVG(vg1_name, [dev1], "initial-tag", 128)
    lvm.createVG(vg2_name, [dev2], "initial-tag", 128)

    lvm.createLV(vg1_name, "lv1", 128, activate=False)
    lvm.createLV(vg2_name, "lv2", 128, activate=False)

    # Make sure that LVs are in LVM cache for both VGs.
    lv1 = lvm.getLV(vg1_name)[0]
    lv2 = lvm.getLV(vg2_name)[0]

    # Simulate refresh SD.
    lvm.invalidateCache()

    # Reload lvs for vg1.
    assert lvm.getLV(vg1_name) == [lv1]

    # Reload lvs for vg2.
    assert lvm.getLV(vg2_name) == [lv2]
Пример #6
0
def stale_lv(tmp_storage):
    dev_size = 1 * 1024**3
    dev = tmp_storage.create_device(dev_size)

    vg_name = str(uuid.uuid4())
    good_lv_name = "good"
    stale_lv_name = "stale"

    lvm.set_read_only(False)

    # Create VG with 2 lvs.
    lvm.createVG(vg_name, [dev], "initial-tag", 128)
    for lv_name in (good_lv_name, stale_lv_name):
        lvm.createLV(vg_name, lv_name, 128, activate=False)

    # Reload the cache.
    good_lv = lvm.getLV(vg_name, good_lv_name)
    stale_lv = lvm.getLV(vg_name, stale_lv_name)

    # Simulate removal of the second LV on another host, leaving stale LV in
    # the cache.
    commands.run([
        "lvremove",
        "--config", tmp_storage.lvm_config(),
        "{}/{}".format(vg_name, stale_lv_name),
    ])

    # The cache still keeps both lvs.
    assert lvm._lvminfo._lvs == {
        (vg_name, good_lv_name): good_lv,
        (vg_name, stale_lv_name): stale_lv,
    }

    return vg_name, good_lv_name, stale_lv_name
Пример #7
0
def test_reload_lvs_with_stale_lv(tmp_storage):
    dev_size = 10 * 1024**3
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv1 = "lv1"
    lv2 = "lv2"

    # Creating VG and LV requires read-write mode.
    lvm.set_read_only(False)
    lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128)

    # Create the LVs.
    lvm.createLV(vg_name, lv1, 1024)
    lvm.createLV(vg_name, lv2, 1024)

    # Make sure that LVs are in the cache.
    expected_lv1 = lvm.getLV(vg_name, lv1)
    expected_lv2 = lvm.getLV(vg_name, lv2)

    # Simulate LV removed on the SPM while this host keeps it in the cache.
    commands.run([
        "lvremove", "-f", "--config",
        tmp_storage.lvm_config(), "{}/{}".format(vg_name, lv2)
    ])

    # Test removing staled LVs in LVMCache._reloadlvs() which can be invoked
    # e.g. by calling lvm.getLv(vg_name).
    lvs = lvm.getLV(vg_name)

    # And verify that first LV is still correctly reported.
    assert expected_lv1 in lvs
    assert expected_lv2 not in lvs
Пример #8
0
    def resourceExists(self, resourceName):
        try:
            lvm.getLV(self._vg, resourceName)
            res = True
        except se.LogicalVolumeDoesNotExistError:
            res = False

        return res
Пример #9
0
    def resourceExists(self, resourceName):
        try:
            lvm.getLV(self._vg, resourceName)
            res = True
        except se.LogicalVolumeDoesNotExistError:
            res = False

        return res
Пример #10
0
def test_lv_stale_cache_one(stale_lv):
    vg_name, good_lv_name, stale_lv_name = stale_lv

    # Until cache is invalidated, return lvs from cache.

    good_lv = lvm.getLV(vg_name, good_lv_name)
    assert good_lv.name == good_lv_name

    stale_lv = lvm.getLV(vg_name, stale_lv_name)
    assert stale_lv.name == stale_lv_name
Пример #11
0
def test_vg_invalidate(tmp_storage):
    dev_size = 1 * GiB

    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    vg1_name = str(uuid.uuid4())
    vg2_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg1_name, [dev1], "initial-tag", 128)
    lvm.createLV(vg1_name, "lv1", 128, activate=False)

    lvm.createVG(vg2_name, [dev2], "initial-tag", 128)
    lvm.createLV(vg2_name, "lv2", 128, activate=False)

    # Reload cache.

    pv1 = lvm.getPV(dev1)
    vg1 = lvm.getVG(vg1_name)
    lv1 = lvm.getLV(vg1_name)[0]

    pv2 = lvm.getPV(dev2)
    vg2 = lvm.getVG(vg2_name)
    lv2 = lvm.getLV(vg2_name)[0]

    assert lvm._lvminfo._pvs == {dev1: pv1, dev2: pv2}
    assert lvm._lvminfo._vgs == {vg1_name: vg1, vg2_name: vg2}
    assert lvm._lvminfo._lvs == {
        (vg1_name, "lv1"): lv1,
        (vg2_name, "lv2"): lv2,
    }

    # Invalidate VG including LVs.
    lvm.invalidateVG(vg1_name, invalidateLVs=False)

    assert lvm._lvminfo._pvs == {dev1: pv1, dev2: pv2}
    assert lvm._lvminfo._vgs == {
        vg1_name: lvm.Stale(vg1_name),
        vg2_name: vg2,
    }
    assert lvm._lvminfo._lvs == {
        (vg1_name, "lv1"): lv1,
        (vg2_name, "lv2"): lv2,
    }

    # getVGs() always reloads the cache.
    clear_stats()
    lvm.getVGs([vg1_name, vg2_name])
    check_stats(hits=0, misses=1)

    assert lvm._lvminfo._vgs == {vg1_name: vg1, vg2_name: vg2}
Пример #12
0
def test_lv_stale_reload_one_stale(stale_lv):
    vg_name, good_lv_name, stale_lv_name = stale_lv

    # Invalidate all lvs in single vg.
    lvm.invalidateVG(vg_name, invalidateLVs=True)

    # The good lv is still in the cache.
    lv = lvm.getLV(vg_name, good_lv_name)
    assert lv.name == good_lv_name

    # The stale lv should be removed.
    with pytest.raises(se.LogicalVolumeDoesNotExistError):
        lvm.getLV(vg_name, stale_lv_name)
Пример #13
0
def test_lv_stale_reload_one_clear(stale_lv):
    vg_name, good_lv_name, stale_lv_name = stale_lv

    # Drop all cache.
    lvm.invalidateCache()

    # The good lv is still in the cache.
    lv = lvm.getLV(vg_name, good_lv_name)
    assert lv.name == good_lv_name

    # The stale lv should be removed.
    with pytest.raises(se.LogicalVolumeDoesNotExistError):
        lvm.getLV(vg_name, stale_lv_name)
Пример #14
0
def test_lv_create_remove(tmp_storage, read_only):
    dev_size = 10 * 1024**3
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv_any = "lv-on-any-device"
    lv_specific = "lv-on-device-2"

    # Creating VG and LV requires read-write mode.
    lvm.set_read_only(False)
    lvm.createVG(vg_name, [dev1, dev2], "initial-tag", 128)

    # Create the first LV on any device.
    lvm.createLV(vg_name, lv_any, 1024)

    # Getting lv must work in both read-only and read-write modes.
    lvm.set_read_only(read_only)

    lv = lvm.getLV(vg_name, lv_any)
    assert lv.name == lv_any
    assert lv.vg_name == vg_name
    assert int(lv.size) == 1024**3
    assert lv.tags == ()
    assert lv.writeable
    assert not lv.opened
    assert lv.active

    # LV typically created on dev1.
    device, extent = lvm.getFirstExt(vg_name, lv_any)
    assert device in dev1, dev2
    assert extent == "0"

    # Create the second LV on dev2 - reuquires read-write mode.
    lvm.set_read_only(False)
    lvm.createLV(vg_name, lv_specific, 1024, device=dev2)

    # Testing LV must work in both read-only and read-write modes.
    lvm.set_read_only(read_only)

    device, extent = lvm.getFirstExt(vg_name, lv_specific)
    assert device == dev2

    # Remove both LVs - requires read-write mode.
    lvm.set_read_only(False)
    lvm.removeLVs(vg_name, [lv_any, lv_specific])

    # Testing if lv exists most work in both read-only and read-write.
    lvm.set_read_only(read_only)
    for lv_name in (lv_any, lv_specific):
        with pytest.raises(se.LogicalVolumeDoesNotExistError):
            lvm.getLV(vg_name, lv_name)
Пример #15
0
 def commit(self):
     lv = lvm.getLV(self.sd_manifest.sdUUID, self.vol_id)
     if sc.TEMP_VOL_LVTAG not in lv.tags:
         raise se.VolumeAlreadyExists("LV %r has already been committed" %
                                      self.vol_id)
     lvm.changeLVTags(self.sd_manifest.sdUUID, self.vol_id,
                      delTags=(sc.TEMP_VOL_LVTAG,))
Пример #16
0
def test_vg_invalidate_lvs(tmp_storage):
    dev_size = 1 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)
    lvm.createLV(vg_name, "lv1", 128, activate=False)

    # Reload cache.
    pv = lvm.getPV(dev)
    vg = lvm.getVG(vg_name)
    lv = lvm.getLV(vg_name)[0]

    assert lvm._lvminfo._pvs == {dev: pv}
    assert lvm._lvminfo._vgs == {vg_name: vg}
    assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lv}

    # Invalidate VG including LVs.
    lvm.invalidateVG(vg_name)

    assert lvm._lvminfo._pvs == {dev: pv}
    assert lvm._lvminfo._vgs == {vg_name: lvm.Stale(vg_name)}
    assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lvm.Stale("lv1")}
Пример #17
0
 def commit(self):
     lv = lvm.getLV(self.sd_manifest.sdUUID, self.vol_id)
     if sc.TEMP_VOL_LVTAG not in lv.tags:
         raise se.VolumeAlreadyExists("LV %r has already been committed" %
                                      self.vol_id)
     lvm.changeLVTags(self.sd_manifest.sdUUID, self.vol_id,
                      delTags=(sc.TEMP_VOL_LVTAG,))
Пример #18
0
def getVolumeTag(sdUUID, volUUID, tagPrefix):
    tags = lvm.getLV(sdUUID, volUUID).tags
    if sc.TAG_VOL_UNINIT in tags:
        log.warning("Reloading uninitialized volume %s/%s", sdUUID, volUUID)
        lvm.invalidateVG(sdUUID)
        tags = lvm.getLV(sdUUID, volUUID).tags
        if sc.TAG_VOL_UNINIT in tags:
            log.error("Found uninitialized volume: %s/%s", sdUUID, volUUID)
            raise se.VolumeDoesNotExist("%s/%s" % (sdUUID, volUUID))

    for tag in tags:
        if tag.startswith(tagPrefix):
            return tag[len(tagPrefix):]
    else:
        log.error("Missing tag %s in volume: %s/%s. tags: %s", tagPrefix,
                  sdUUID, volUUID, tags)
        raise se.MissingTagOnLogicalVolume(volUUID, tagPrefix)
Пример #19
0
def test_lv_stale_cache_all(stale_lv):
    vg_name, good_lv_name, stale_lv_name = stale_lv

    # Until cache is invalidated, return lvs from cache.

    lv_names = {lv.name for lv in lvm.getLV(vg_name)}
    assert good_lv_name in lv_names
    assert stale_lv_name in lv_names
Пример #20
0
def getVolumeTag(sdUUID, volUUID, tagPrefix):
    tags = lvm.getLV(sdUUID, volUUID).tags
    if sc.TAG_VOL_UNINIT in tags:
        log.warning("Reloading uninitialized volume %s/%s", sdUUID, volUUID)
        lvm.invalidateVG(sdUUID)
        tags = lvm.getLV(sdUUID, volUUID).tags
        if sc.TAG_VOL_UNINIT in tags:
            log.error("Found uninitialized volume: %s/%s", sdUUID, volUUID)
            raise se.VolumeDoesNotExist("%s/%s" % (sdUUID, volUUID))

    for tag in tags:
        if tag.startswith(tagPrefix):
            return tag[len(tagPrefix):]
    else:
        log.error("Missing tag %s in volume: %s/%s. tags: %s",
                  tagPrefix, sdUUID, volUUID, tags)
        raise se.MissingTagOnLogicalVolume(volUUID, tagPrefix)
Пример #21
0
def test_bootstrap(tmp_storage, read_only):
    dev_size = 20 * 1024**3

    lvm.set_read_only(False)

    dev1 = tmp_storage.create_device(dev_size)
    vg1_name = str(uuid.uuid4())
    lvm.createVG(vg1_name, [dev1], "initial-tag", 128)

    dev2 = tmp_storage.create_device(dev_size)
    vg2_name = str(uuid.uuid4())
    lvm.createVG(vg2_name, [dev2], "initial-tag", 128)

    vgs = (vg1_name, vg2_name)

    for vg_name in vgs:
        # Create active lvs.
        for lv_name in ("skip", "prepared", "opened", "unused"):
            lvm.createLV(vg_name, lv_name, 1024)

        # Create links to prepared lvs.
        img_dir = os.path.join(sc.P_VDSM_STORAGE, vg_name, "img")
        os.makedirs(img_dir)
        os.symlink(
            lvm.lvPath(vg_name, "prepared"),
            os.path.join(img_dir, "prepared"))

    # Open some lvs during bootstrap.
    vg1_opened = lvm.lvPath(vg1_name, "opened")
    vg2_opened = lvm.lvPath(vg2_name, "opened")
    with open(vg1_opened), open(vg2_opened):

        lvm.set_read_only(read_only)

        lvm.bootstrap(skiplvs=["skip"])

        # Lvs in skiplvs, prepared lvs, and opened lvs should be active.
        for vg_name in vgs:
            for lv_name in ("skip", "prepared", "opened"):
                lv = lvm.getLV(vg_name, lv_name)
                assert lv.active

        # Unused lvs should not be active.
        for vg_name in vgs:
            lv = lvm.getLV(vg_name, "unused")
            assert not lv.active
Пример #22
0
def test_bootstrap(tmp_storage, read_only):
    dev_size = 20 * GiB

    lvm.set_read_only(False)

    dev1 = tmp_storage.create_device(dev_size)
    vg1_name = str(uuid.uuid4())
    lvm.createVG(vg1_name, [dev1], "initial-tag", 128)

    dev2 = tmp_storage.create_device(dev_size)
    vg2_name = str(uuid.uuid4())
    lvm.createVG(vg2_name, [dev2], "initial-tag", 128)

    vgs = (vg1_name, vg2_name)

    for vg_name in vgs:
        # Create active lvs.
        for lv_name in ("skip", "prepared", "opened", "unused"):
            lvm.createLV(vg_name, lv_name, 1024)

        # Create links to prepared lvs.
        img_dir = os.path.join(sc.P_VDSM_STORAGE, vg_name, "img")
        os.makedirs(img_dir)
        os.symlink(
            lvm.lvPath(vg_name, "prepared"),
            os.path.join(img_dir, "prepared"))

    # Open some lvs during bootstrap.
    vg1_opened = lvm.lvPath(vg1_name, "opened")
    vg2_opened = lvm.lvPath(vg2_name, "opened")
    with open(vg1_opened), open(vg2_opened):

        lvm.set_read_only(read_only)

        lvm.bootstrap(skiplvs=["skip"])

        # Lvs in skiplvs, prepared lvs, and opened lvs should be active.
        for vg_name in vgs:
            for lv_name in ("skip", "prepared", "opened"):
                lv = lvm.getLV(vg_name, lv_name)
                assert lv.active

        # Unused lvs should not be active.
        for vg_name in vgs:
            lv = lvm.getLV(vg_name, "unused")
            assert not lv.active
Пример #23
0
def test_lv_stale_cache_all(stale_lv):
    vg_name, good_lv_name, stale_lv_name = stale_lv

    # LVs always skip the cache.
    # TODO: Use cache if VG did not change.

    lv_names = {lv.name for lv in lvm.getLV(vg_name)}
    assert good_lv_name in lv_names
    assert stale_lv_name not in lv_names
Пример #24
0
def delete_vol_tag(vol, tag_prefix):
    lv = lvm.getLV(vol.sdUUID, vol.volUUID)
    old_tags = {tag for tag in lv.tags if tag.startswith(tag_prefix)}

    lvm.changeLVsTags(vol.sdUUID, vol.volUUID, delTags=old_tags)
    try:
        yield
    finally:
        lvm.changeLVsTags(vol.sdUUID, vol.volUUID, addTags=old_tags)
Пример #25
0
def test_lv_stale_reload_all_stale(stale_lv):
    vg_name, good_lv_name, stale_lv_name = stale_lv

    # Invalidate all lvs in single vg.
    lvm.invalidateVG(vg_name, invalidateLVs=True)

    # Only the good lv is reported.
    lvs = [lv.name for lv in lvm.getLV(vg_name)]
    assert lvs == [good_lv_name]
Пример #26
0
def test_lv_stale_reload_all_clear(stale_lv):
    vg_name, good_lv_name, stale_lv_name = stale_lv

    # Drop all cache.
    lvm.invalidateCache()

    # Only the good lv is reported.
    lvs = [lv.name for lv in lvm.getLV(vg_name)]
    assert lvs == [good_lv_name]
Пример #27
0
 def validate(self):
     try:
         lv = lvm.getLV(self.sdUUID, self.volUUID)
     except se.LogicalVolumeDoesNotExistError:
         raise se.VolumeDoesNotExist(self.volUUID)
     else:
         if sc.TEMP_VOL_LVTAG in lv.tags:
             self.log.warning("Tried to produce a volume artifact: %s/%s",
                              self.sdUUID, self.volUUID)
             raise se.VolumeDoesNotExist(self.volUUID)
     volume.VolumeManifest.validate(self)
Пример #28
0
 def validate(self):
     try:
         lv = lvm.getLV(self.sdUUID, self.volUUID)
     except se.LogicalVolumeDoesNotExistError:
         raise se.VolumeDoesNotExist(self.volUUID)
     else:
         if sc.TEMP_VOL_LVTAG in lv.tags:
             self.log.warning("Tried to produce a volume artifact: %s/%s",
                              self.sdUUID, self.volUUID)
             raise se.VolumeDoesNotExist(self.volUUID)
     volume.VolumeManifest.validate(self)
Пример #29
0
def test_lv_extend_reduce(tmp_storage):
    dev_size = 20 * 1024**3
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.createLV(vg_name, lv_name, 1024)

    lvm.extendLV(vg_name, lv_name, 2048)

    lv = lvm.getLV(vg_name, lv_name)
    assert int(lv.size) == 2 * 1024**3

    # Reducing active LV requires force.
    lvm.reduceLV(vg_name, lv_name, 1024, force=True)
    lv = lvm.getLV(vg_name, lv_name)
    assert int(lv.size) == 1 * 1024**3
Пример #30
0
def test_lv_refresh(tmp_storage, read_only):
    dev_size = 20 * 1024**3
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv_name = str(uuid.uuid4())
    lv_fullname = "{}/{}".format(vg_name, lv_name)

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.createLV(vg_name, lv_name, 1024)

    lvm.set_read_only(read_only)

    # Simulate extending the LV on the SPM.
    commands.run([
        "lvextend",
        "--config", tmp_storage.lvm_config(),
        "-L+1g",
        lv_fullname
    ])

    # Refreshing LV invalidates the cache to pick up changes from storage.
    lvm.refreshLVs(vg_name, [lv_name])
    lv = lvm.getLV(vg_name, lv_name)
    assert int(lv.size) == 2 * 1024**3

    # Simulate extending the LV on the SPM.
    commands.run([
        "lvextend",
        "--config", tmp_storage.lvm_config(),
        "-L+1g",
        lv_fullname
    ])

    # Activate active LV refreshes it.
    lvm.activateLVs(vg_name, [lv_name])
    lv = lvm.getLV(vg_name, lv_name)
    assert int(lv.size) == 3 * 1024**3
Пример #31
0
def test_lv_refresh(tmp_storage, read_only):
    dev_size = 20 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv_name = str(uuid.uuid4())
    lv_fullname = "{}/{}".format(vg_name, lv_name)

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.createLV(vg_name, lv_name, 1024)

    lvm.set_read_only(read_only)

    # Simulate extending the LV on the SPM.
    commands.run([
        "lvextend",
        "--config", tmp_storage.lvm_config(),
        "-L+1g",
        lv_fullname
    ])

    # Refreshing LV invalidates the cache to pick up changes from storage.
    lvm.refreshLVs(vg_name, [lv_name])
    lv = lvm.getLV(vg_name, lv_name)
    assert int(lv.size) == 2 * GiB

    # Simulate extending the LV on the SPM.
    commands.run([
        "lvextend",
        "--config", tmp_storage.lvm_config(),
        "-L+1g",
        lv_fullname
    ])

    # Activate active LV refreshes it.
    lvm.activateLVs(vg_name, [lv_name])
    lv = lvm.getLV(vg_name, lv_name)
    assert int(lv.size) == 3 * GiB
Пример #32
0
def test_vg_invalidate_lvs(tmp_storage):
    dev_size = 1 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)
    lvm.createLV(vg_name, "lv1", 128, activate=False)

    # Reload cache.
    pv = lvm.getPV(dev)
    vg = lvm.getVG(vg_name)

    clear_stats()
    lv = lvm.getLV(vg_name)[0]
    check_stats(hits=0, misses=1)

    # Accessing LVs always access storage.
    # TODO: Use cache if VG did not change.
    lvm.getLV(vg_name)
    check_stats(hits=0, misses=2)

    assert lvm._lvminfo._pvs == {dev: pv}
    assert lvm._lvminfo._vgs == {vg_name: vg}
    assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lv}

    # Invalidate VG including LVs.
    lvm.invalidateVG(vg_name)

    assert lvm._lvminfo._pvs == {dev: pv}
    assert lvm._lvminfo._vgs == {vg_name: lvm.Stale(vg_name)}
    assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lvm.Stale("lv1")}

    # Accessing LVs always access storage.
    # TODO: Use cache if VG did not change.
    clear_stats()
    lvm.getLV(vg_name)
    check_stats(hits=0, misses=1)
Пример #33
0
def test_lv_add_delete_tags(tmp_storage):
    dev_size = 20 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv1_name = str(uuid.uuid4())
    lv2_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.createLV(vg_name, lv1_name, 1024, activate=False)
    lvm.createLV(vg_name, lv2_name, 1024, activate=False)

    lvm.changeLVsTags(vg_name, (lv1_name, lv2_name),
                      delTags=("initial-tag", ),
                      addTags=("new-tag-1", "new-tag-2"))

    lv1 = lvm.getLV(vg_name, lv1_name)
    lv2 = lvm.getLV(vg_name, lv2_name)
    assert sorted(lv1.tags) == ["new-tag-1", "new-tag-2"]
    assert sorted(lv2.tags) == ["new-tag-1", "new-tag-2"]
Пример #34
0
def test_lv_extend_reduce(tmp_storage):
    dev_size = 20 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.createLV(vg_name, lv_name, 1024)

    lvm.extendLV(vg_name, lv_name, 2048)

    lv = lvm.getLV(vg_name, lv_name)
    assert int(lv.size) == 2 * GiB

    # Extending LV to same does nothing.

    lvm.extendLV(vg_name, lv_name, 2048)

    lvm.invalidateVG(vg_name)
    lv = lvm.getLV(vg_name, lv_name)
    assert int(lv.size) == 2 * GiB

    # Extending LV to smaller size does nothing.

    lvm.extendLV(vg_name, lv_name, 1024)

    lvm.invalidateVG(vg_name)
    lv = lvm.getLV(vg_name, lv_name)
    assert int(lv.size) == 2 * GiB

    # Reducing active LV requires force.
    lvm.reduceLV(vg_name, lv_name, 1024, force=True)
    lv = lvm.getLV(vg_name, lv_name)
    assert int(lv.size) == 1 * GiB
Пример #35
0
def change_vol_tag(vol, tag_prefix, tag_value):
    lv = lvm.getLV(vol.sdUUID, vol.volUUID)
    new_tags = {tag_prefix + tag_value}
    old_tags = {tag for tag in lv.tags if tag.startswith(tag_prefix)}

    lvm.changeLVsTags(vol.sdUUID,
                      vol.volUUID,
                      delTags=old_tags,
                      addTags=new_tags)
    try:
        yield
    finally:
        lvm.changeLVsTags(vol.sdUUID,
                          vol.volUUID,
                          delTags=new_tags,
                          addTags=old_tags)
Пример #36
0
    def changeVolumeTag(self, tagPrefix, uuid):

        if tagPrefix not in sc.VOLUME_TAGS:
            raise se.LogicalVolumeWrongTagError(tagPrefix)

        oldTag = ""
        for tag in lvm.getLV(self.sdUUID, self.volUUID).tags:
            if tag.startswith(tagPrefix):
                oldTag = tag
                break

        if not oldTag:
            raise se.MissingTagOnLogicalVolume(self.volUUID, tagPrefix)

        newTag = tagPrefix + uuid
        if oldTag != newTag:
            lvm.replaceLVTag(self.sdUUID, self.volUUID, oldTag, newTag)
Пример #37
0
    def changeVolumeTag(self, tagPrefix, uuid):

        if tagPrefix not in sc.VOLUME_TAGS:
            raise se.LogicalVolumeWrongTagError(tagPrefix)

        oldTag = ""
        for tag in lvm.getLV(self.sdUUID, self.volUUID).tags:
            if tag.startswith(tagPrefix):
                oldTag = tag
                break

        if not oldTag:
            raise se.MissingTagOnLogicalVolume(self.volUUID, tagPrefix)

        newTag = tagPrefix + uuid
        if oldTag != newTag:
            lvm.replaceLVTag(self.sdUUID, self.volUUID, oldTag, newTag)
Пример #38
0
 def halfbakedVolumeRollback(cls, taskObj, sdUUID, volUUID, volPath):
     cls.log.info("sdUUID=%s volUUID=%s volPath=%s" %
                  (sdUUID, volUUID, volPath))
     try:
         # Fix me: assert resource lock.
         tags = lvm.getLV(sdUUID, volUUID).tags
     except se.LogicalVolumeDoesNotExistError:
         pass  # It's OK: inexistent LV, don't try to remove.
     else:
         if sc.TAG_VOL_UNINIT in tags:
             try:
                 lvm.removeLVs(sdUUID, (volUUID, ))
             except se.LogicalVolumeRemoveError as e:
                 cls.log.warning("Cannot remove logical volume: %s", e)
             if os.path.lexists(volPath):
                 cls.log.info("Unlinking half baked volume: %s", volPath)
                 os.unlink(volPath)
Пример #39
0
def test_lv_rename(tmp_storage):
    dev_size = 20 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.createLV(vg_name, lv_name, 1024)

    new_lv_name = "renamed-" + lv_name

    lvm.renameLV(vg_name, lv_name, new_lv_name)

    lv = lvm.getLV(vg_name, new_lv_name)
    assert lv.name == new_lv_name
Пример #40
0
def test_lv_rename(tmp_storage):
    dev_size = 20 * 1024**3
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.createLV(vg_name, lv_name, 1024)

    new_lv_name = "renamed-" + lv_name

    lvm.renameLV(vg_name, lv_name, new_lv_name)

    lv = lvm.getLV(vg_name, new_lv_name)
    assert lv.name == new_lv_name
Пример #41
0
    def halfbakedVolumeRollback(cls, taskObj, sdUUID, volUUID, volPath):
        cls.log.info("sdUUID=%s volUUID=%s volPath=%s" %
                     (sdUUID, volUUID, volPath))
        try:
            # Fix me: assert resource lock.
            tags = lvm.getLV(sdUUID, volUUID).tags
        except se.LogicalVolumeDoesNotExistError:
            pass  # It's OK: inexistent LV, don't try to remove.
        else:
            if sc.TAG_VOL_UNINIT in tags:
                try:
                    lvm.removeLVs(sdUUID, volUUID)
                except se.CannotRemoveLogicalVolume as e:
                    cls.log.warning("Remove logical volume failed %s/%s %s",
                                    sdUUID, volUUID, str(e))

                if os.path.lexists(volPath):
                    cls.log.info("Unlinking half baked volume: %s", volPath)
                    os.unlink(volPath)
Пример #42
0
    def _create_lv_artifact(self, parent, lv_size):
        try:
            lv = lvm.getLV(self.sd_manifest.sdUUID, self.vol_id)
        except se.LogicalVolumeDoesNotExistError:
            pass
        else:
            if sc.TEMP_VOL_LVTAG in lv.tags:
                raise se.DomainHasGarbage("Logical volume artifact %s exists" %
                                          self.vol_id)
            else:
                raise se.VolumeAlreadyExists("Logical volume %s exists" %
                                             self.vol_id)

        parent_vol_id = parent.vol_id if parent else sc.BLANK_UUID
        tags = (sc.TEMP_VOL_LVTAG,
                sc.TAG_PREFIX_PARENT + parent_vol_id,
                sc.TAG_PREFIX_IMAGE + self.img_id)
        lvm.createLV(self.sd_manifest.sdUUID, self.vol_id, lv_size,
                     activate=True, initialTags=tags)
Пример #43
0
def test_lv_add_delete_tags(tmp_storage):
    dev_size = 20 * 1024**3
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())
    lv_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.createLV(vg_name, lv_name, 1024, activate=False)

    lvm.changeLVTags(
        vg_name,
        lv_name,
        delTags=("initial-tag",),
        addTags=("new-tag-1", "new-tag-2"))

    lv = lvm.getLV(vg_name, lv_name)
    assert sorted(lv.tags) == ["new-tag-1", "new-tag-2"]
Пример #44
0
def test_volume_life_cycle(monkeypatch, tmp_storage, tmp_repo, fake_access,
                           fake_rescan, tmp_db, fake_task, fake_sanlock):
    # as creation of block storage domain and volume is quite time consuming,
    # we test several volume operations in one test to speed up the test suite

    sd_uuid = str(uuid.uuid4())
    domain_name = "domain"
    domain_version = 4

    dev = tmp_storage.create_device(20 * 1024 ** 3)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(
        sdUUID=sd_uuid,
        domainName=domain_name,
        domClass=sd.DATA_DOMAIN,
        vgUUID=vg.uuid,
        version=domain_version,
        storageType=sd.ISCSI_DOMAIN,
        block_size=sc.BLOCK_SIZE_512,
        alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    img_uuid = str(uuid.uuid4())
    vol_uuid = str(uuid.uuid4())
    vol_capacity = 10 * 1024**3
    vol_size = vol_capacity // sc.BLOCK_SIZE_512
    vol_desc = "Test volume"

    # Create domain directory structure.
    dom.refresh()
    # Attache repo pool - SD expects at least one pool is attached.
    dom.attach(tmp_repo.pool_id)

    with monkeypatch.context() as mc:
        mc.setattr(time, "time", lambda: 1550522547)
        dom.createVolume(
            imgUUID=img_uuid,
            size=vol_size,
            volFormat=sc.COW_FORMAT,
            preallocate=sc.SPARSE_VOL,
            diskType=sc.DATA_DISKTYPE,
            volUUID=vol_uuid,
            desc=vol_desc,
            srcImgUUID=sc.BLANK_UUID,
            srcVolUUID=sc.BLANK_UUID)

    # test create volume
    vol = dom.produceVolume(img_uuid, vol_uuid)
    actual = vol.getInfo()

    expected_lease = {
        "offset": ((blockSD.RESERVED_LEASES + 4) * sc.BLOCK_SIZE_512 *
                   sd.LEASE_BLOCKS),
        "owners": [],
        "path": "/dev/{}/leases".format(sd_uuid),
        "version": None,
    }

    assert int(actual["capacity"]) == vol_capacity
    assert int(actual["ctime"]) == 1550522547
    assert actual["description"] == vol_desc
    assert actual["disktype"] == "DATA"
    assert actual["domain"] == sd_uuid
    assert actual["format"] == "COW"
    assert actual["lease"] == expected_lease
    assert actual["parent"] == sc.BLANK_UUID
    assert actual["status"] == "OK"
    assert actual["type"] == "SPARSE"
    assert actual["voltype"] == "LEAF"
    assert actual["uuid"] == vol_uuid

    vol_path = vol.getVolumePath()

    # Keep the slot before deleting the volume.
    _, slot = vol.getMetadataId()

    # test volume prepare
    assert os.path.islink(vol_path)
    assert not os.path.exists(vol_path)

    vol.prepare()

    assert os.path.exists(vol_path)

    # verify we can really write and read to an image
    qemuio.write_pattern(vol_path, "qcow2")
    qemuio.verify_pattern(vol_path, "qcow2")

    # test volume teardown
    vol.teardown(sd_uuid, vol_uuid)

    assert os.path.islink(vol_path)
    assert not os.path.exists(vol_path)

    # test also deleting of the volume
    vol.delete(postZero=False, force=False, discard=False)

    # verify lvm with volume is deleted
    assert not os.path.islink(vol.getVolumePath())
    with pytest.raises(se.LogicalVolumeDoesNotExistError):
        lvm.getLV(sd_uuid, vol_uuid)

    # verify also metadata from metadata lv is deleted
    data = dom.manifest.read_metadata_block(slot)
    assert data == b"\0" * sc.METADATA_SIZE
Пример #45
0
def test_create_domain_metadata(tmp_storage, tmp_repo, fake_sanlock,
                                domain_version):
    sd_uuid = str(uuid.uuid4())
    domain_name = "loop-domain"

    dev1 = tmp_storage.create_device(10 * 1024**3)
    dev2 = tmp_storage.create_device(10 * 1024**3)
    lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG,
                 128)
    vg = lvm.getVG(sd_uuid)
    pv1 = lvm.getPV(dev1)
    pv2 = lvm.getPV(dev2)

    dom = blockSD.BlockStorageDomain.create(
        sdUUID=sd_uuid,
        domainName=domain_name,
        domClass=sd.DATA_DOMAIN,
        vgUUID=vg.uuid,
        version=domain_version,
        storageType=sd.ISCSI_DOMAIN,
        block_size=sc.BLOCK_SIZE_512,
        alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    lease = sd.DEFAULT_LEASE_PARAMS
    expected = {
        # Common storage domain values.
        sd.DMDK_CLASS: sd.DATA_DOMAIN,
        sd.DMDK_DESCRIPTION: domain_name,
        sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC],
        sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES],
        sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC],
        sd.DMDK_LOCK_POLICY: "",
        sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC:
            lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
        sd.DMDK_POOLS: [],
        sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
        sd.DMDK_SDUUID: sd_uuid,
        sd.DMDK_TYPE: sd.ISCSI_DOMAIN,
        sd.DMDK_VERSION: domain_version,

        # Block storge domain extra values.
        blockSD.DMDK_VGUUID: vg.uuid,

        # PV keys for blockSD.DMDK_PV_REGEX.
        "PV0": {
            'guid': os.path.basename(dev1),
            'mapoffset': '0',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv1.uuid,
        },
        "PV1": {
            'guid': os.path.basename(dev2),
            'mapoffset': '77',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv2.uuid,
        },
    }

    # In version 5 we removed LOGBLKSIZE and PHYBLKSIZE and added
    # ALIGNMENT and BLOCK_SIZE.
    if domain_version < 5:
        expected[sd.DMDK_LOGBLKSIZE] = sc.BLOCK_SIZE_512
        expected[sd.DMDK_PHYBLKSIZE] = sc.BLOCK_SIZE_512
    else:
        expected[sd.DMDK_ALIGNMENT] = sc.ALIGNMENT_1M
        expected[sd.DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512

    actual = dom.getMetadata()

    assert expected == actual

    # Check that first PV is device where metadata is stored.
    assert dev1 == lvm.getVgMetadataPv(dom.sdUUID)

    lv = lvm.getLV(dom.sdUUID, sd.METADATA)
    assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * constants.MEGAB
Пример #46
0
 def is_garbage(self):
     try:
         lv = lvm.getLV(self.sd_manifest.sdUUID, self.vol_id)
     except se.LogicalVolumeDoesNotExistError:
         return False
     return sc.TEMP_VOL_LVTAG in lv.tags