Exemplo n.º 1
0
def test_vg_create_multiple_devices(tmp_storage, read_only):
    dev_size = 10 * GiB
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    dev3 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.createVG(vg_name, [dev1, dev2, dev3], "initial-tag", 128)

    lvm.set_read_only(read_only)

    vg = lvm.getVG(vg_name)
    assert vg.name == vg_name
    assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3))

    # pvs is broken with read-only mode
    # https://bugzilla.redhat.com/1809660.
    lvm.set_read_only(False)

    # The first pv (metadata pv) will have the 2 used metadata areas.
    pv = lvm.getPV(dev1)
    assert pv.name == dev1
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    # The rest of the pvs will have 2 unused metadata areas.
    for dev in dev2, dev3:
        pv = lvm.getPV(dev)
        assert pv.name == dev
        assert pv.vg_name == vg_name
        assert int(pv.dev_size) == dev_size
        assert int(pv.mda_count) == 2
        assert int(pv.mda_used_count) == 0

    # TODO: should work also in read-only mode.
    lvm.removeVG(vg_name)

    lvm.set_read_only(read_only)

    # We remove the VG
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)

    # pvs is broken with read-only mode
    # https://bugzilla.redhat.com/1809660.
    lvm.set_read_only(False)

    # But keep the PVs, not sure why.
    for dev in dev1, dev2, dev3:
        pv = lvm.getPV(dev)
        assert pv.name == dev
        assert pv.vg_name == ""
Exemplo n.º 2
0
def test_vg_create_multiple_devices(tmp_storage, read_only):
    dev_size = 10 * 1024**3
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    dev3 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.createVG(vg_name, [dev1, dev2, dev3], "initial-tag", 128)

    lvm.set_read_only(read_only)

    vg = lvm.getVG(vg_name)
    assert vg.name == vg_name
    assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3))

    # The first pv (metadata pv) will have the 2 used metadata areas.
    pv = lvm.getPV(dev1)
    assert pv.name == dev1
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    # The rest of the pvs will have 2 unused metadata areas.
    for dev in dev2, dev3:
        pv = lvm.getPV(dev)
        assert pv.name == dev
        assert pv.vg_name == vg_name
        assert int(pv.dev_size) == dev_size
        assert int(pv.mda_count) == 2
        assert int(pv.mda_used_count) == 0

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.removeVG(vg_name)

    # TODO: check this also in read-only mode. vgs fail now after removing the
    # vg, and this cause 10 retries that take 15 seconds.

    # We remove the VG
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)

    # But keep the PVs, not sure why.
    for dev in dev1, dev2, dev3:
        pv = lvm.getPV(dev)
        assert pv.name == dev
        assert pv.vg_name == ""
Exemplo n.º 3
0
def test_vg_create_multiple_devices(tmp_storage, read_only):
    dev_size = 10 * 1024**3
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    dev3 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.createVG(vg_name, [dev1, dev2, dev3], "initial-tag", 128)

    lvm.set_read_only(read_only)

    vg = lvm.getVG(vg_name)
    assert vg.name == vg_name
    assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3))

    # The first pv (metadata pv) will have the 2 used metadata areas.
    pv = lvm.getPV(dev1)
    assert pv.name == dev1
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    # The rest of the pvs will have 2 unused metadata areas.
    for dev in dev2, dev3:
        pv = lvm.getPV(dev)
        assert pv.name == dev
        assert pv.vg_name == vg_name
        assert int(pv.dev_size) == dev_size
        assert int(pv.mda_count) == 2
        assert int(pv.mda_used_count) == 0

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.removeVG(vg_name)

    # TODO: check this also in read-only mode. vgs fail now after removing the
    # vg, and this cause 10 retries that take 15 seconds.

    # We remove the VG
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)

    # But keep the PVs, not sure why.
    for dev in dev1, dev2, dev3:
        pv = lvm.getPV(dev)
        assert pv.name == dev
        assert pv.vg_name == ""
Exemplo n.º 4
0
def test_vg_invalidate(tmp_storage):
    dev_size = 1 * GiB

    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    vg1_name = str(uuid.uuid4())
    vg2_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg1_name, [dev1], "initial-tag", 128)
    lvm.createLV(vg1_name, "lv1", 128, activate=False)

    lvm.createVG(vg2_name, [dev2], "initial-tag", 128)
    lvm.createLV(vg2_name, "lv2", 128, activate=False)

    # Reload cache.

    pv1 = lvm.getPV(dev1)
    vg1 = lvm.getVG(vg1_name)
    lv1 = lvm.getLV(vg1_name)[0]

    pv2 = lvm.getPV(dev2)
    vg2 = lvm.getVG(vg2_name)
    lv2 = lvm.getLV(vg2_name)[0]

    assert lvm._lvminfo._pvs == {dev1: pv1, dev2: pv2}
    assert lvm._lvminfo._vgs == {vg1_name: vg1, vg2_name: vg2}
    assert lvm._lvminfo._lvs == {
        (vg1_name, "lv1"): lv1,
        (vg2_name, "lv2"): lv2,
    }

    # Invalidate VG including LVs.
    lvm.invalidateVG(vg1_name, invalidateLVs=False)

    assert lvm._lvminfo._pvs == {dev1: pv1, dev2: pv2}
    assert lvm._lvminfo._vgs == {
        vg1_name: lvm.Stale(vg1_name),
        vg2_name: vg2,
    }
    assert lvm._lvminfo._lvs == {
        (vg1_name, "lv1"): lv1,
        (vg2_name, "lv2"): lv2,
    }

    # getVGs() always reloads the cache.
    clear_stats()
    lvm.getVGs([vg1_name, vg2_name])
    check_stats(hits=0, misses=1)

    assert lvm._lvminfo._vgs == {vg1_name: vg1, vg2_name: vg2}
Exemplo n.º 5
0
def test_pv_stale_reload_one_clear(stale_pv):
    vg_name, good_pv_name, stale_pv_name = stale_pv

    # Drop all cache.
    lvm.invalidateCache()

    # The good pv is still in the cache.
    pv = lvm.getPV(good_pv_name)
    assert pv.name == good_pv_name

    # The stale pv shuld be removed.
    with pytest.raises(se.InaccessiblePhysDev):
        lvm.getPV(stale_pv_name)
Exemplo n.º 6
0
def test_pv_stale_reload_one_stale(stale_pv):
    vg_name, good_pv_name, stale_pv_name = stale_pv

    # Invalidate VG and its PVs.
    lvm.invalidateVG(vg_name, invalidatePVs=True)

    # The good pv is still in the cache.
    pv = lvm.getPV(good_pv_name)
    assert pv.name == good_pv_name

    # Reloading the stale pv marks it as Unreadable.
    pv = lvm.getPV(stale_pv_name)
    assert pv == lvm.Unreadable(stale_pv_name)
Exemplo n.º 7
0
def test_pv_stale_reload_one_stale(stale_pv):
    vg_name, good_pv_name, stale_pv_name = stale_pv

    # Invalidate VG and its PVs.
    lvm.invalidateVG(vg_name, invalidatePVs=True)

    # The good pv is still in the cache.
    pv = lvm.getPV(good_pv_name)
    assert pv.name == good_pv_name

    # The stale pv shuld be removed.
    with pytest.raises(se.InaccessiblePhysDev):
        lvm.getPV(stale_pv_name)
Exemplo n.º 8
0
def test_vg_create_remove_single_device(tmp_storage, read_only):
    dev_size = 20 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.set_read_only(read_only)

    vg = lvm.getVG(vg_name)
    assert vg.name == vg_name
    assert vg.pv_name == (dev, )
    assert vg.tags == ("initial-tag", )
    assert int(vg.extent_size) == 128 * MiB

    # pvs is broken with read-only mode
    # https://bugzilla.redhat.com/1809660.
    lvm.set_read_only(False)

    pv = lvm.getPV(dev)

    lvm.set_read_only(read_only)

    assert pv.name == dev
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.removeVG(vg_name)

    lvm.set_read_only(read_only)

    # We remove the VG
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)

    # pvs is broken with read-only mode
    # https://bugzilla.redhat.com/1809660.
    lvm.set_read_only(False)

    # But keep the PVs, not sure why.
    pv = lvm.getPV(dev)
    assert pv.name == dev
    assert pv.vg_name == ""
Exemplo n.º 9
0
def test_pv_stale_reload_invalidated(stale_pv):
    vg_name, good_pv_name, stale_pv_name = stale_pv

    # Invalidate the good pv.
    lvm._lvminfo._invalidatepvs(good_pv_name)
    # Reloading the good pv returns it as valid.
    pv = lvm.getPV(good_pv_name)
    assert pv.name == good_pv_name

    # Invalidate the stale pv.
    lvm._lvminfo._invalidatepvs(stale_pv_name)
    # Reloading the stale pv returns it as Unreadable.
    pv = lvm.getPV(stale_pv_name)
    assert pv == lvm.Unreadable(stale_pv_name)
Exemplo n.º 10
0
def test_vg_invalidate_lvs(tmp_storage):
    dev_size = 1 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)
    lvm.createLV(vg_name, "lv1", 128, activate=False)

    # Reload cache.
    pv = lvm.getPV(dev)
    vg = lvm.getVG(vg_name)
    lv = lvm.getLV(vg_name)[0]

    assert lvm._lvminfo._pvs == {dev: pv}
    assert lvm._lvminfo._vgs == {vg_name: vg}
    assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lv}

    # Invalidate VG including LVs.
    lvm.invalidateVG(vg_name)

    assert lvm._lvminfo._pvs == {dev: pv}
    assert lvm._lvminfo._vgs == {vg_name: lvm.Stale(vg_name)}
    assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lvm.Stale("lv1")}
Exemplo n.º 11
0
def test_vg_extend_reduce(tmp_storage):
    dev_size = 10 * 1024**3
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    dev3 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev1], "initial-tag", 128)

    vg = lvm.getVG(vg_name)
    assert vg.pv_name == (dev1, )

    lvm.extendVG(vg_name, [dev2, dev3], force=False)
    vg = lvm.getVG(vg_name)
    assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3))

    # The first pv (metadata pv) will have the 2 used metadata areas.
    pv = lvm.getPV(dev1)
    assert pv.name == dev1
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    # The rest of the pvs will have 2 unused metadata areas.
    for dev in dev2, dev3:
        pv = lvm.getPV(dev)
        assert pv.name == dev
        assert pv.vg_name == vg_name
        assert int(pv.dev_size) == dev_size
        assert int(pv.mda_count) == 2
        assert int(pv.mda_used_count) == 0

    lvm.reduceVG(vg_name, dev2)
    vg = lvm.getVG(vg_name)
    assert sorted(vg.pv_name) == sorted((dev1, dev3))

    lvm.removeVG(vg_name)
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)
Exemplo n.º 12
0
def test_vg_extend_reduce(tmp_storage):
    dev_size = 10 * 1024**3
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    dev3 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev1], "initial-tag", 128)

    vg = lvm.getVG(vg_name)
    assert vg.pv_name == (dev1,)

    lvm.extendVG(vg_name, [dev2, dev3], force=False)
    vg = lvm.getVG(vg_name)
    assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3))

    # The first pv (metadata pv) will have the 2 used metadata areas.
    pv = lvm.getPV(dev1)
    assert pv.name == dev1
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    # The rest of the pvs will have 2 unused metadata areas.
    for dev in dev2, dev3:
        pv = lvm.getPV(dev)
        assert pv.name == dev
        assert pv.vg_name == vg_name
        assert int(pv.dev_size) == dev_size
        assert int(pv.mda_count) == 2
        assert int(pv.mda_used_count) == 0

    lvm.reduceVG(vg_name, dev2)
    vg = lvm.getVG(vg_name)
    assert sorted(vg.pv_name) == sorted((dev1, dev3))

    lvm.removeVG(vg_name)
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)
Exemplo n.º 13
0
def test_vg_create_remove_single_device(tmp_storage, read_only):
    dev_size = 20 * 1024**3
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.set_read_only(read_only)

    vg = lvm.getVG(vg_name)
    assert vg.name == vg_name
    assert vg.pv_name == (dev, )
    assert vg.tags == ("initial-tag", )
    assert int(vg.extent_size) == 128 * 1024**2

    pv = lvm.getPV(dev)
    assert pv.name == dev
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.removeVG(vg_name)

    # TODO: check this also in read-only mode. vgs fail now after removing the
    # vg, and this cause 10 retries that take 15 seconds.

    # We remove the VG
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)

    # But keep the PVs, not sure why.
    pv = lvm.getPV(dev)
    assert pv.name == dev
    assert pv.vg_name == ""
Exemplo n.º 14
0
def test_vg_create_remove_single_device(tmp_storage, read_only):
    dev_size = 20 * 1024**3
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.set_read_only(read_only)

    vg = lvm.getVG(vg_name)
    assert vg.name == vg_name
    assert vg.pv_name == (dev,)
    assert vg.tags == ("initial-tag",)
    assert int(vg.extent_size) == 128 * 1024**2

    pv = lvm.getPV(dev)
    assert pv.name == dev
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.removeVG(vg_name)

    # TODO: check this also in read-only mode. vgs fail now after removing the
    # vg, and this cause 10 retries that take 15 seconds.

    # We remove the VG
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)

    # But keep the PVs, not sure why.
    pv = lvm.getPV(dev)
    assert pv.name == dev
    assert pv.vg_name == ""
Exemplo n.º 15
0
def test_vg_invalidate_lvs(tmp_storage):
    dev_size = 1 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)
    lvm.createLV(vg_name, "lv1", 128, activate=False)

    # Reload cache.
    pv = lvm.getPV(dev)
    vg = lvm.getVG(vg_name)

    clear_stats()
    lv = lvm.getLV(vg_name)[0]
    check_stats(hits=0, misses=1)

    # Accessing LVs always access storage.
    # TODO: Use cache if VG did not change.
    lvm.getLV(vg_name)
    check_stats(hits=0, misses=2)

    assert lvm._lvminfo._pvs == {dev: pv}
    assert lvm._lvminfo._vgs == {vg_name: vg}
    assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lv}

    # Invalidate VG including LVs.
    lvm.invalidateVG(vg_name)

    assert lvm._lvminfo._pvs == {dev: pv}
    assert lvm._lvminfo._vgs == {vg_name: lvm.Stale(vg_name)}
    assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lvm.Stale("lv1")}

    # Accessing LVs always access storage.
    # TODO: Use cache if VG did not change.
    clear_stats()
    lvm.getLV(vg_name)
    check_stats(hits=0, misses=1)
Exemplo n.º 16
0
def test_vg_invalidate_lvs_pvs(tmp_storage):
    dev_size = 1 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)
    lvm.createLV(vg_name, "lv1", 128, activate=False)

    # Reload cache.
    pv = lvm.getPV(dev)
    vg = lvm.getVG(vg_name)
    lv = lvm.getLV(vg_name)[0]

    assert lvm._lvminfo._pvs == {dev: pv}

    clear_stats()
    lvm._lvminfo.getPvs(vg_name)
    # getPVs() first finds the VG using getVG(), so there is a cache hit.
    # No stale PVs for the VG so getPVs() will have another cache hit.
    check_stats(hits=2, misses=0)

    assert lvm._lvminfo._vgs == {vg_name: vg}
    assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lv}

    # Invalidate VG including LVs and PVs.
    lvm.invalidateVG(vg_name, invalidatePVs=True)

    assert lvm._lvminfo._vgs == {vg_name: lvm.Stale(vg_name)}
    assert lvm._lvminfo._pvs == {dev: lvm.Stale(dev)}

    clear_stats()
    lvm._lvminfo.getPvs(vg_name)
    # getPVs() will not find the invalidated VG in cache, so there is a miss.
    # There are stale PVs for the VG so getPVs() will have another cache miss.
    check_stats(hits=0, misses=2)

    assert lvm._lvminfo._lvs == {(vg_name, "lv1"): lvm.Stale("lv1")}
Exemplo n.º 17
0
def test_vg_extend_reduce(tmp_storage):
    dev_size = 10 * GiB
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    dev3 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev1], "initial-tag", 128)

    clear_stats()
    vg = lvm.getVG(vg_name)
    check_stats(hits=0, misses=1)

    # Call getVG() again will get cache hit.
    lvm.getVG(vg_name)
    check_stats(hits=1, misses=1)

    assert vg.pv_name == (dev1,)

    lvm.extendVG(vg_name, [dev2, dev3], force=False)

    clear_stats()
    vg = lvm.getVG(vg_name)
    # Calling getVG() after extendVG() does not use the cache.
    # This happens because extendVG() invalidates the VG.
    check_stats(hits=0, misses=1)

    assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3))

    clear_stats()
    # The first pv (metadata pv) will have the 2 used metadata areas.
    pv = lvm.getPV(dev1)
    check_stats(hits=0, misses=1)

    assert pv.name == dev1
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    # The rest of the pvs will have 2 unused metadata areas.
    for dev in dev2, dev3:
        clear_stats()
        pv = lvm.getPV(dev)
        check_stats(hits=0, misses=1)

        assert pv.name == dev
        assert pv.vg_name == vg_name
        assert int(pv.dev_size) == dev_size
        assert int(pv.mda_count) == 2
        assert int(pv.mda_used_count) == 0

    lvm.reduceVG(vg_name, dev2)
    clear_stats()
    vg = lvm.getVG(vg_name)
    # Calling getVG() after reduceVG() does not use the cache.
    # This happens because reduceVG() invalidates the VG.
    check_stats(hits=0, misses=1)

    assert sorted(vg.pv_name) == sorted((dev1, dev3))

    lvm.removeVG(vg_name)

    clear_stats()
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)
    check_stats(hits=0, misses=1)
Exemplo n.º 18
0
def test_create_domain_metadata(tmp_storage, tmp_repo, fake_sanlock,
                                domain_version):
    sd_uuid = str(uuid.uuid4())
    domain_name = "loop-domain"

    dev1 = tmp_storage.create_device(10 * GiB)
    dev2 = tmp_storage.create_device(10 * GiB)
    lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG,
                 128)
    vg = lvm.getVG(sd_uuid)
    pv1 = lvm.getPV(dev1)
    pv2 = lvm.getPV(dev2)

    dom = blockSD.BlockStorageDomain.create(
        sdUUID=sd_uuid,
        domainName=domain_name,
        domClass=sd.DATA_DOMAIN,
        vgUUID=vg.uuid,
        version=domain_version,
        storageType=sd.ISCSI_DOMAIN)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    lease = sd.DEFAULT_LEASE_PARAMS
    expected = {
        # Common storage domain values.
        sd.DMDK_CLASS: sd.DATA_DOMAIN,
        sd.DMDK_DESCRIPTION: domain_name,
        sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC],
        sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES],
        sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC],
        sd.DMDK_LOCK_POLICY: "",
        sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC:
            lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
        sd.DMDK_POOLS: [],
        sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
        sd.DMDK_SDUUID: sd_uuid,
        sd.DMDK_TYPE: sd.ISCSI_DOMAIN,
        sd.DMDK_VERSION: domain_version,

        # Block storge domain extra values.
        blockSD.DMDK_VGUUID: vg.uuid,

        # PV keys for blockSD.DMDK_PV_REGEX.
        "PV0": {
            'guid': os.path.basename(dev1),
            'mapoffset': '0',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv1.uuid,
        },
        "PV1": {
            'guid': os.path.basename(dev2),
            'mapoffset': '77',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv2.uuid,
        },
    }

    # In version 5 we removed LOGBLKSIZE and PHYBLKSIZE and added
    # ALIGNMENT and BLOCK_SIZE.
    if domain_version < 5:
        expected[sd.DMDK_LOGBLKSIZE] = sc.BLOCK_SIZE_512
        expected[sd.DMDK_PHYBLKSIZE] = sc.BLOCK_SIZE_512
    else:
        expected[sd.DMDK_ALIGNMENT] = sc.ALIGNMENT_1M
        expected[sd.DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512

    # Tests also alignment and block size properties here.
    assert dom.alignment == sc.ALIGNMENT_1M
    assert dom.block_size == sc.BLOCK_SIZE_512

    actual = dom.getMetadata()

    assert expected == actual

    # Check that first PV is device where metadata is stored.
    assert dev1 == lvm.getVgMetadataPv(dom.sdUUID)

    lv = lvm.getLV(dom.sdUUID, sd.METADATA)
    assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * MiB

    # Test the domain lease.
    lease = dom.getClusterLease()
    assert lease.name == "SDM"
    assert lease.path == "/dev/{}/leases".format(dom.sdUUID)
    assert lease.offset == dom.alignment

    resource = fake_sanlock.read_resource(
        lease.path,
        lease.offset,
        align=dom.alignment,
        sector=dom.block_size)

    assert resource == {
        "acquired": False,
        "align": dom.alignment,
        "lockspace": dom.sdUUID.encode("utf-8"),
        "resource": lease.name.encode("utf-8"),
        "sector": dom.block_size,
        "version": 0,
    }

    # Test special volumes sizes.

    for name in (sd.IDS, sd.INBOX, sd.OUTBOX, sd.METADATA):
        lv = lvm.getLV(dom.sdUUID, name)
        # This is the minimal LV size on block storage.
        assert int(lv.size) == 128 * MiB

    lv = lvm.getLV(dom.sdUUID, blockSD.MASTERLV)
    assert int(lv.size) == GiB

    lv = lvm.getLV(dom.sdUUID, sd.LEASES)
    assert int(lv.size) == sd.LEASES_SLOTS * dom.alignment

    if domain_version > 3:
        lv = lvm.getLV(dom.sdUUID, sd.XLEASES)
        assert int(lv.size) == sd.XLEASES_SLOTS * dom.alignment
Exemplo n.º 19
0
def test_create_domain_metadata(tmp_storage, tmp_repo, fake_sanlock,
                                domain_version):
    sd_uuid = str(uuid.uuid4())
    domain_name = "loop-domain"

    dev1 = tmp_storage.create_device(10 * 1024**3)
    dev2 = tmp_storage.create_device(10 * 1024**3)
    lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG,
                 128)
    vg = lvm.getVG(sd_uuid)
    pv1 = lvm.getPV(dev1)
    pv2 = lvm.getPV(dev2)

    dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid,
                                            domainName=domain_name,
                                            domClass=sd.DATA_DOMAIN,
                                            vgUUID=vg.uuid,
                                            version=domain_version,
                                            storageType=sd.ISCSI_DOMAIN,
                                            block_size=sc.BLOCK_SIZE_512,
                                            alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    lease = sd.DEFAULT_LEASE_PARAMS
    expected = {
        # Common storage domain values.
        sd.DMDK_CLASS: sd.DATA_DOMAIN,
        sd.DMDK_DESCRIPTION: domain_name,
        sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC],
        sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES],
        sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC],
        sd.DMDK_LOCK_POLICY: "",
        sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC:
        lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
        sd.DMDK_POOLS: [],
        sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
        sd.DMDK_SDUUID: sd_uuid,
        sd.DMDK_TYPE: sd.ISCSI_DOMAIN,
        sd.DMDK_VERSION: domain_version,

        # Block storge domain extra values.
        blockSD.DMDK_VGUUID: vg.uuid,

        # PV keys for blockSD.DMDK_PV_REGEX.
        "PV0": {
            'guid': os.path.basename(dev1),
            'mapoffset': '0',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv1.uuid,
        },
        "PV1": {
            'guid': os.path.basename(dev2),
            'mapoffset': '77',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv2.uuid,
        },
    }

    # In version 5 we removed LOGBLKSIZE and PHYBLKSIZE and added
    # ALIGNMENT and BLOCK_SIZE.
    if domain_version < 5:
        expected[sd.DMDK_LOGBLKSIZE] = sc.BLOCK_SIZE_512
        expected[sd.DMDK_PHYBLKSIZE] = sc.BLOCK_SIZE_512
    else:
        expected[sd.DMDK_ALIGNMENT] = sc.ALIGNMENT_1M
        expected[sd.DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512

    # Tests also alignment and block size properties here.
    assert dom.alignment == sc.ALIGNMENT_1M
    assert dom.block_size == sc.BLOCK_SIZE_512

    actual = dom.getMetadata()

    assert expected == actual

    # Check that first PV is device where metadata is stored.
    assert dev1 == lvm.getVgMetadataPv(dom.sdUUID)

    lv = lvm.getLV(dom.sdUUID, sd.METADATA)
    assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * constants.MEGAB
Exemplo n.º 20
0
def test_create_domain_metadata(tmp_storage, tmp_repo, domain_version):
    sd_uuid = str(uuid.uuid4())
    domain_name = "loop-domain"

    dev1 = tmp_storage.create_device(10 * 1024**3)
    dev2 = tmp_storage.create_device(10 * 1024**3)
    lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG,
                 128)
    vg = lvm.getVG(sd_uuid)
    pv1 = lvm.getPV(dev1)
    pv2 = lvm.getPV(dev2)

    dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid,
                                            domainName=domain_name,
                                            domClass=sd.DATA_DOMAIN,
                                            vgUUID=vg.uuid,
                                            version=domain_version,
                                            storageType=sd.ISCSI_DOMAIN,
                                            block_size=sc.BLOCK_SIZE_512,
                                            alignment=sc.ALIGNMENT_1M)

    lease = sd.DEFAULT_LEASE_PARAMS
    assert dom.getMetadata() == {
        # Common storge domain values.
        sd.DMDK_CLASS: sd.DATA_DOMAIN,
        sd.DMDK_DESCRIPTION: domain_name,
        sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC],
        sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES],
        sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC],
        sd.DMDK_LOCK_POLICY: "",
        sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC:
        lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
        sd.DMDK_POOLS: [],
        sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
        sd.DMDK_SDUUID: sd_uuid,
        sd.DMDK_TYPE: sd.ISCSI_DOMAIN,
        sd.DMDK_VERSION: domain_version,

        # Block storge domain extra values.
        blockSD.DMDK_VGUUID: vg.uuid,
        blockSD.DMDK_LOGBLKSIZE: sc.BLOCK_SIZE_512,
        blockSD.DMDK_PHYBLKSIZE: sc.BLOCK_SIZE_512,

        # PV keys for blockSD.DMDK_PV_REGEX.
        "PV0": {
            'guid': os.path.basename(dev1),
            'mapoffset': '0',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv1.uuid,
        },
        "PV1": {
            'guid': os.path.basename(dev2),
            'mapoffset': '77',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv2.uuid,
        },
    }

    # Check that first PV is device where metadata is stored.
    assert dev1 == lvm.getVgMetadataPv(dom.sdUUID)

    lv = lvm.getLV(dom.sdUUID, sd.METADATA)
    assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * constants.MEGAB
Exemplo n.º 21
0
def test_create_domain_metadata(tmp_storage, tmp_repo, fake_sanlock,
                                domain_version):
    sd_uuid = str(uuid.uuid4())
    domain_name = "loop-domain"

    dev1 = tmp_storage.create_device(10 * 1024**3)
    dev2 = tmp_storage.create_device(10 * 1024**3)
    lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG,
                 128)
    vg = lvm.getVG(sd_uuid)
    pv1 = lvm.getPV(dev1)
    pv2 = lvm.getPV(dev2)

    dom = blockSD.BlockStorageDomain.create(
        sdUUID=sd_uuid,
        domainName=domain_name,
        domClass=sd.DATA_DOMAIN,
        vgUUID=vg.uuid,
        version=domain_version,
        storageType=sd.ISCSI_DOMAIN,
        block_size=sc.BLOCK_SIZE_512,
        alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    lease = sd.DEFAULT_LEASE_PARAMS
    expected = {
        # Common storage domain values.
        sd.DMDK_CLASS: sd.DATA_DOMAIN,
        sd.DMDK_DESCRIPTION: domain_name,
        sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC],
        sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES],
        sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC],
        sd.DMDK_LOCK_POLICY: "",
        sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC:
            lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
        sd.DMDK_POOLS: [],
        sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
        sd.DMDK_SDUUID: sd_uuid,
        sd.DMDK_TYPE: sd.ISCSI_DOMAIN,
        sd.DMDK_VERSION: domain_version,

        # Block storge domain extra values.
        blockSD.DMDK_VGUUID: vg.uuid,

        # PV keys for blockSD.DMDK_PV_REGEX.
        "PV0": {
            'guid': os.path.basename(dev1),
            'mapoffset': '0',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv1.uuid,
        },
        "PV1": {
            'guid': os.path.basename(dev2),
            'mapoffset': '77',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv2.uuid,
        },
    }

    # In version 5 we removed LOGBLKSIZE and PHYBLKSIZE and added
    # ALIGNMENT and BLOCK_SIZE.
    if domain_version < 5:
        expected[sd.DMDK_LOGBLKSIZE] = sc.BLOCK_SIZE_512
        expected[sd.DMDK_PHYBLKSIZE] = sc.BLOCK_SIZE_512
    else:
        expected[sd.DMDK_ALIGNMENT] = sc.ALIGNMENT_1M
        expected[sd.DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512

    actual = dom.getMetadata()

    assert expected == actual

    # Check that first PV is device where metadata is stored.
    assert dev1 == lvm.getVgMetadataPv(dom.sdUUID)

    lv = lvm.getLV(dom.sdUUID, sd.METADATA)
    assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * constants.MEGAB