Exemplo n.º 1
0
def test_vg_create_multiple_devices(tmp_storage, read_only):
    dev_size = 10 * 1024**3
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    dev3 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.createVG(vg_name, [dev1, dev2, dev3], "initial-tag", 128)

    lvm.set_read_only(read_only)

    vg = lvm.getVG(vg_name)
    assert vg.name == vg_name
    assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3))

    # The first pv (metadata pv) will have the 2 used metadata areas.
    pv = lvm.getPV(dev1)
    assert pv.name == dev1
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    # The rest of the pvs will have 2 unused metadata areas.
    for dev in dev2, dev3:
        pv = lvm.getPV(dev)
        assert pv.name == dev
        assert pv.vg_name == vg_name
        assert int(pv.dev_size) == dev_size
        assert int(pv.mda_count) == 2
        assert int(pv.mda_used_count) == 0

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.removeVG(vg_name)

    # TODO: check this also in read-only mode. vgs fail now after removing the
    # vg, and this cause 10 retries that take 15 seconds.

    # We remove the VG
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)

    # But keep the PVs, not sure why.
    for dev in dev1, dev2, dev3:
        pv = lvm.getPV(dev)
        assert pv.name == dev
        assert pv.vg_name == ""
Exemplo n.º 2
0
def test_vg_extend_reduce(tmp_storage):
    dev_size = 10 * 1024**3
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    dev3 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev1], "initial-tag", 128)

    vg = lvm.getVG(vg_name)
    assert vg.pv_name == (dev1,)

    lvm.extendVG(vg_name, [dev2, dev3], force=False)
    vg = lvm.getVG(vg_name)
    assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3))

    # The first pv (metadata pv) will have the 2 used metadata areas.
    pv = lvm.getPV(dev1)
    assert pv.name == dev1
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    # The rest of the pvs will have 2 unused metadata areas.
    for dev in dev2, dev3:
        pv = lvm.getPV(dev)
        assert pv.name == dev
        assert pv.vg_name == vg_name
        assert int(pv.dev_size) == dev_size
        assert int(pv.mda_count) == 2
        assert int(pv.mda_used_count) == 0

    lvm.reduceVG(vg_name, dev2)
    vg = lvm.getVG(vg_name)
    assert sorted(vg.pv_name) == sorted((dev1, dev3))

    lvm.removeVG(vg_name)
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)
Exemplo n.º 3
0
def test_vg_create_remove_single_device(tmp_storage, read_only):
    dev_size = 20 * 1024**3
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.set_read_only(read_only)

    vg = lvm.getVG(vg_name)
    assert vg.name == vg_name
    assert vg.pv_name == (dev,)
    assert vg.tags == ("initial-tag",)
    assert int(vg.extent_size) == 128 * 1024**2

    pv = lvm.getPV(dev)
    assert pv.name == dev
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.removeVG(vg_name)

    # TODO: check this also in read-only mode. vgs fail now after removing the
    # vg, and this cause 10 retries that take 15 seconds.

    # We remove the VG
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)

    # But keep the PVs, not sure why.
    pv = lvm.getPV(dev)
    assert pv.name == dev
    assert pv.vg_name == ""
Exemplo n.º 4
0
def test_vg_add_delete_tags(tmp_storage):
    dev_size = 20 * 1024**3
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.changeVGTags(
        vg_name,
        delTags=("initial-tag",),
        addTags=("new-tag-1", "new-tag-2"))

    lvm.changeVGTags(
        vg_name,
        delTags=["initial-tag"],
        addTags=["new-tag-1", "new-tag-2"])

    vg = lvm.getVG(vg_name)
    assert sorted(vg.tags) == ["new-tag-1", "new-tag-2"]
Exemplo n.º 5
0
def test_retry_with_wider_filter(tmp_storage):
    lvm.set_read_only(False)

    # Force reload of the cache. The system does not know about any device at
    # this point.
    lvm.getAllPVs()

    # Create a device - this device in not the lvm cached filter yet.
    dev = tmp_storage.create_device(20 * 1024**3)

    # We run vgcreate with explicit devices argument, so the filter is correct
    # and it succeeds.
    vg_name = str(uuid.uuid4())
    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    # The cached filter is stale at this point, and so is the vg metadata in
    # the cache. Running "vgs vg-name" fails because of the stale filter, so we
    # invalidate the filter and run it again.

    vg = lvm.getVG(vg_name)
    assert vg.pv_name == (dev,)
Exemplo n.º 6
0
def test_vg_add_delete_tags(tmp_storage):
    dev_size = 20 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.changeVGTags(vg_name,
                     delTags=("initial-tag", ),
                     addTags=("new-tag-1", "new-tag-2"))

    lvm.changeVGTags(vg_name,
                     delTags=["initial-tag"],
                     addTags=["new-tag-1", "new-tag-2"])

    clear_stats()
    vg = lvm.getVG(vg_name)
    check_stats(hits=0, misses=1)

    assert sorted(vg.tags) == ["new-tag-1", "new-tag-2"]
Exemplo n.º 7
0
def test_retry_with_wider_filter(tmp_storage, read_only):
    lvm.set_read_only(read_only)

    # Force reload of the cache. The system does not know about any device at
    # this point.
    clear_stats()
    lvm.getAllPVs()
    check_stats(hits=0, misses=1)

    # Create a device - this device in not the lvm cached filter yet.
    dev = tmp_storage.create_device(20 * GiB)

    # Creating VG requires read-write mode.
    lvm.set_read_only(False)

    # We run vgcreate with explicit devices argument, so the filter is correct
    # and it succeeds.
    vg_name = str(uuid.uuid4())
    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    # Calling getAllPVs() have cache miss since createVG invalidates the PVs.
    clear_stats()
    lvm.getAllPVs()
    check_stats(hits=0, misses=1)

    # Second call for getAllPVs() adds cache hit since the new PV was reloaded.
    lvm.getAllPVs()
    check_stats(hits=1, misses=1)

    # Checking VG must work in both read-only and read-write modes.
    lvm.set_read_only(read_only)

    # The cached filter is stale at this point, and so is the vg metadata in
    # the cache. Running "vgs vg-name" fails because of the stale filter, so we
    # invalidate the filter and run it again.
    vg = lvm.getVG(vg_name)
    assert vg.pv_name == (dev,)
Exemplo n.º 8
0
def test_create_domain_metadata(tmp_storage, tmp_repo, fake_sanlock,
                                domain_version):
    sd_uuid = str(uuid.uuid4())
    domain_name = "loop-domain"

    dev1 = tmp_storage.create_device(10 * 1024**3)
    dev2 = tmp_storage.create_device(10 * 1024**3)
    lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG,
                 128)
    vg = lvm.getVG(sd_uuid)
    pv1 = lvm.getPV(dev1)
    pv2 = lvm.getPV(dev2)

    dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid,
                                            domainName=domain_name,
                                            domClass=sd.DATA_DOMAIN,
                                            vgUUID=vg.uuid,
                                            version=domain_version,
                                            storageType=sd.ISCSI_DOMAIN,
                                            block_size=sc.BLOCK_SIZE_512,
                                            alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    lease = sd.DEFAULT_LEASE_PARAMS
    expected = {
        # Common storage domain values.
        sd.DMDK_CLASS: sd.DATA_DOMAIN,
        sd.DMDK_DESCRIPTION: domain_name,
        sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC],
        sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES],
        sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC],
        sd.DMDK_LOCK_POLICY: "",
        sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC:
        lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
        sd.DMDK_POOLS: [],
        sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
        sd.DMDK_SDUUID: sd_uuid,
        sd.DMDK_TYPE: sd.ISCSI_DOMAIN,
        sd.DMDK_VERSION: domain_version,

        # Block storge domain extra values.
        blockSD.DMDK_VGUUID: vg.uuid,

        # PV keys for blockSD.DMDK_PV_REGEX.
        "PV0": {
            'guid': os.path.basename(dev1),
            'mapoffset': '0',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv1.uuid,
        },
        "PV1": {
            'guid': os.path.basename(dev2),
            'mapoffset': '77',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv2.uuid,
        },
    }

    # In version 5 we removed LOGBLKSIZE and PHYBLKSIZE and added
    # ALIGNMENT and BLOCK_SIZE.
    if domain_version < 5:
        expected[sd.DMDK_LOGBLKSIZE] = sc.BLOCK_SIZE_512
        expected[sd.DMDK_PHYBLKSIZE] = sc.BLOCK_SIZE_512
    else:
        expected[sd.DMDK_ALIGNMENT] = sc.ALIGNMENT_1M
        expected[sd.DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512

    # Tests also alignment and block size properties here.
    assert dom.alignment == sc.ALIGNMENT_1M
    assert dom.block_size == sc.BLOCK_SIZE_512

    actual = dom.getMetadata()

    assert expected == actual

    # Check that first PV is device where metadata is stored.
    assert dev1 == lvm.getVgMetadataPv(dom.sdUUID)

    lv = lvm.getLV(dom.sdUUID, sd.METADATA)
    assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * constants.MEGAB
Exemplo n.º 9
0
def test_create_volume(monkeypatch, tmp_storage, tmp_repo, fake_access,
                       fake_rescan, tmp_db, fake_task, fake_sanlock):
    sd_uuid = str(uuid.uuid4())
    domain_name = "domain"
    domain_version = 4

    dev = tmp_storage.create_device(20 * 1024**3)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid,
                                            domainName=domain_name,
                                            domClass=sd.DATA_DOMAIN,
                                            vgUUID=vg.uuid,
                                            version=domain_version,
                                            storageType=sd.ISCSI_DOMAIN,
                                            block_size=sc.BLOCK_SIZE_512,
                                            alignment=sc.ALIGNMENT_1M)

    img_uuid = str(uuid.uuid4())
    vol_uuid = str(uuid.uuid4())
    vol_capacity = 10 * 1024**3
    vol_size = vol_capacity // sc.BLOCK_SIZE_512
    vol_desc = "Test volume"

    # Create domain directory structure.
    dom.refresh()
    # Attache repo pool - SD expects at least one pool is attached.
    dom.attach(tmp_repo.pool_id)

    with monkeypatch.context() as mc:
        mc.setattr(time, "time", lambda: 1550522547)
        dom.createVolume(imgUUID=img_uuid,
                         size=vol_size,
                         volFormat=sc.COW_FORMAT,
                         preallocate=sc.SPARSE_VOL,
                         diskType=image.DISK_TYPES[image.DATA_DISK_TYPE],
                         volUUID=vol_uuid,
                         desc=vol_desc,
                         srcImgUUID=sc.BLANK_UUID,
                         srcVolUUID=sc.BLANK_UUID)

    vol = dom.produceVolume(img_uuid, vol_uuid)
    actual = vol.getInfo()

    expected_lease = {
        "offset":
        ((blockSD.RESERVED_LEASES + 4) * sc.BLOCK_SIZE_512 * sd.LEASE_BLOCKS),
        "owners": [],
        "path":
        "/dev/{}/leases".format(sd_uuid),
        "version":
        None,
    }

    assert int(actual["capacity"]) == vol_capacity
    assert int(actual["ctime"]) == 1550522547
    assert actual["description"] == vol_desc
    assert actual["disktype"] == "DATA"
    assert actual["domain"] == sd_uuid
    assert actual["format"] == "COW"
    assert actual["lease"] == expected_lease
    assert actual["parent"] == sc.BLANK_UUID
    assert actual["status"] == "OK"
    assert actual["type"] == "SPARSE"
    assert actual["voltype"] == "LEAF"
    assert actual["uuid"] == vol_uuid
Exemplo n.º 10
0
def test_create_domain_metadata(tmp_storage, tmp_repo, domain_version):
    sd_uuid = str(uuid.uuid4())
    domain_name = "loop-domain"

    dev1 = tmp_storage.create_device(10 * 1024**3)
    dev2 = tmp_storage.create_device(10 * 1024**3)
    lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG,
                 128)
    vg = lvm.getVG(sd_uuid)
    pv1 = lvm.getPV(dev1)
    pv2 = lvm.getPV(dev2)

    dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid,
                                            domainName=domain_name,
                                            domClass=sd.DATA_DOMAIN,
                                            vgUUID=vg.uuid,
                                            version=domain_version,
                                            storageType=sd.ISCSI_DOMAIN,
                                            block_size=sc.BLOCK_SIZE_512,
                                            alignment=sc.ALIGNMENT_1M)

    lease = sd.DEFAULT_LEASE_PARAMS
    assert dom.getMetadata() == {
        # Common storge domain values.
        sd.DMDK_CLASS: sd.DATA_DOMAIN,
        sd.DMDK_DESCRIPTION: domain_name,
        sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC],
        sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES],
        sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC],
        sd.DMDK_LOCK_POLICY: "",
        sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC:
        lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
        sd.DMDK_POOLS: [],
        sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
        sd.DMDK_SDUUID: sd_uuid,
        sd.DMDK_TYPE: sd.ISCSI_DOMAIN,
        sd.DMDK_VERSION: domain_version,

        # Block storge domain extra values.
        blockSD.DMDK_VGUUID: vg.uuid,
        blockSD.DMDK_LOGBLKSIZE: sc.BLOCK_SIZE_512,
        blockSD.DMDK_PHYBLKSIZE: sc.BLOCK_SIZE_512,

        # PV keys for blockSD.DMDK_PV_REGEX.
        "PV0": {
            'guid': os.path.basename(dev1),
            'mapoffset': '0',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv1.uuid,
        },
        "PV1": {
            'guid': os.path.basename(dev2),
            'mapoffset': '77',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv2.uuid,
        },
    }

    # Check that first PV is device where metadata is stored.
    assert dev1 == lvm.getVgMetadataPv(dom.sdUUID)

    lv = lvm.getLV(dom.sdUUID, sd.METADATA)
    assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * constants.MEGAB
Exemplo n.º 11
0
def test_volume_metadata(tmp_storage, tmp_repo, fake_access, fake_rescan,
                         tmp_db, fake_task, fake_sanlock, domain_version):
    sd_uuid = str(uuid.uuid4())

    dev = tmp_storage.create_device(20 * GiB)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(
        sdUUID=sd_uuid,
        domainName="domain",
        domClass=sd.DATA_DOMAIN,
        vgUUID=vg.uuid,
        version=domain_version,
        storageType=sd.ISCSI_DOMAIN)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    dom.refresh()
    dom.attach(tmp_repo.pool_id)

    img_uuid = str(uuid.uuid4())
    vol_uuid = str(uuid.uuid4())

    dom.createVolume(
        desc="old description",
        diskType="DATA",
        imgUUID=img_uuid,
        preallocate=sc.SPARSE_VOL,
        capacity=10 * GiB,
        srcImgUUID=sc.BLANK_UUID,
        srcVolUUID=sc.BLANK_UUID,
        volFormat=sc.COW_FORMAT,
        volUUID=vol_uuid)

    vol = dom.produceVolume(img_uuid, vol_uuid)

    # Test metadata offset
    _, slot = vol.getMetadataId()
    offset = dom.manifest.metadata_offset(slot)
    if domain_version < 5:
        assert offset == slot * blockSD.METADATA_SLOT_SIZE_V4
    else:
        assert offset == (blockSD.METADATA_BASE_V5 + slot *
                          blockSD.METADATA_SLOT_SIZE_V5)

    meta_path = dom.manifest.metadata_volume_path()

    # Check capacity
    assert 10 * GiB == vol.getCapacity()
    vol.setCapacity(0)
    with pytest.raises(se.MetaDataValidationError):
        vol.getCapacity()
    vol.setCapacity(10 * GiB)

    # Change metadata.
    md = vol.getMetadata()
    md.description = "new description"
    vol.setMetadata(md)
    with open(meta_path, "rb") as f:
        f.seek(offset)
        data = f.read(sc.METADATA_SIZE)
    data = data.rstrip(b"\0")
    assert data == md.storage_format(domain_version)

    # Add additioanl metadata.
    md = vol.getMetadata()
    vol.setMetadata(md, CAP=md.capacity)
    with open(meta_path, "rb") as f:
        f.seek(offset)
        data = f.read(sc.METADATA_SIZE)
    data = data.rstrip(b"\0")
    assert data == md.storage_format(domain_version, CAP=md.capacity)
Exemplo n.º 12
0
def test_volume_life_cycle(monkeypatch, tmp_storage, tmp_repo, fake_access,
                           fake_rescan, tmp_db, fake_task, fake_sanlock,
                           domain_version):
    # as creation of block storage domain and volume is quite time consuming,
    # we test several volume operations in one test to speed up the test suite

    sd_uuid = str(uuid.uuid4())
    domain_name = "domain"

    dev = tmp_storage.create_device(20 * GiB)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(
        sdUUID=sd_uuid,
        domainName=domain_name,
        domClass=sd.DATA_DOMAIN,
        vgUUID=vg.uuid,
        version=domain_version,
        storageType=sd.ISCSI_DOMAIN)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    img_uuid = str(uuid.uuid4())
    vol_uuid = str(uuid.uuid4())
    vol_capacity = 10 * GiB
    vol_desc = "Test volume"

    # Create domain directory structure.
    dom.refresh()
    # Attache repo pool - SD expects at least one pool is attached.
    dom.attach(tmp_repo.pool_id)

    with monkeypatch.context() as mc:
        mc.setattr(time, "time", lambda: 1550522547)
        dom.createVolume(
            imgUUID=img_uuid,
            capacity=vol_capacity,
            volFormat=sc.COW_FORMAT,
            preallocate=sc.SPARSE_VOL,
            diskType=sc.DATA_DISKTYPE,
            volUUID=vol_uuid,
            desc=vol_desc,
            srcImgUUID=sc.BLANK_UUID,
            srcVolUUID=sc.BLANK_UUID)

    # test create volume
    vol = dom.produceVolume(img_uuid, vol_uuid)

    # Get the metadata slot, used for volume metadata and volume lease offset.
    _, slot = vol.getMetadataId()

    lease = dom.getVolumeLease(img_uuid, vol_uuid)

    assert lease.name == vol.volUUID
    assert lease.path == "/dev/{}/leases".format(sd_uuid)
    assert lease.offset == (blockSD.RESERVED_LEASES + slot) * dom.alignment

    # Test that we created a sanlock resource for this volume.
    resource = fake_sanlock.read_resource(
        lease.path,
        lease.offset,
        align=dom.alignment,
        sector=dom.block_size)

    assert resource == {
        "acquired": False,
        "align": dom.alignment,
        "lockspace": vol.sdUUID.encode("utf-8"),
        "resource": vol.volUUID.encode("utf-8"),
        "sector": dom.block_size,
        "version": 0,
    }

    # Test volume info.
    actual = vol.getInfo()

    assert int(actual["capacity"]) == vol_capacity
    assert int(actual["ctime"]) == 1550522547
    assert actual["description"] == vol_desc
    assert actual["disktype"] == "DATA"
    assert actual["domain"] == sd_uuid
    assert actual["format"] == "COW"
    assert actual["lease"] == {
        "offset": lease.offset,
        "owners": [],
        "path": lease.path,
        "version": None,
    }
    assert actual["parent"] == sc.BLANK_UUID
    assert actual["status"] == sc.VOL_STATUS_OK
    assert actual["type"] == "SPARSE"
    assert actual["voltype"] == "LEAF"
    assert actual["uuid"] == vol_uuid

    vol_path = vol.getVolumePath()

    # test volume prepare
    assert os.path.islink(vol_path)
    assert not os.path.exists(vol_path)

    lv_size = int(lvm.getLV(sd_uuid, vol_uuid).size)

    # Check volume size of unprepared volume - uses lvm.
    size = dom.getVolumeSize(img_uuid, vol_uuid)
    assert size.apparentsize == size.truesize == lv_size

    vol.prepare()

    assert os.path.exists(vol_path)

    # Check volume size of prepared volume - uses seek.
    size = dom.getVolumeSize(img_uuid, vol_uuid)
    assert size.apparentsize == size.truesize == lv_size

    # verify we can really write and read to an image
    qemuio.write_pattern(vol_path, "qcow2")
    qemuio.verify_pattern(vol_path, "qcow2")

    # test volume teardown
    vol.teardown(sd_uuid, vol_uuid)

    assert os.path.islink(vol_path)
    assert not os.path.exists(vol_path)

    # test also deleting of the volume
    vol.delete(postZero=False, force=False, discard=False)

    # verify lvm with volume is deleted
    assert not os.path.islink(vol.getVolumePath())
    with pytest.raises(se.LogicalVolumeDoesNotExistError):
        lvm.getLV(sd_uuid, vol_uuid)
Exemplo n.º 13
0
def test_dump_sd_metadata(monkeypatch, tmp_storage, tmp_repo, fake_sanlock,
                          fake_task, domain_version):

    # Allow to dump fake sanlock leases information created during the test.
    monkeypatch.setattr(sanlock_direct, "dump_leases",
                        fake_sanlock.dump_leases)
    monkeypatch.setattr(sanlock_direct, "dump_lockspace",
                        fake_sanlock.dump_lockspace)

    sd_uuid = str(uuid.uuid4())
    dev = tmp_storage.create_device(20 * GiB)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid,
                                            domainName="test",
                                            domClass=sd.DATA_DOMAIN,
                                            vgUUID=vg.uuid,
                                            version=domain_version,
                                            storageType=sd.ISCSI_DOMAIN)
    dom.refresh()
    dom.attach(tmp_repo.pool_id)

    md_dev = os.path.basename(dev)
    expected_metadata = {
        'uuid': sd_uuid,
        'type': 'ISCSI',
        'class': 'Data',
        'name': 'test',
        'role': sd.REGULAR_DOMAIN,
        'pool': [tmp_repo.pool_id],
        'version': str(domain_version),
        'block_size': sc.BLOCK_SIZE_512,
        'alignment': sc.ALIGNMENT_1M,
        'vguuid': vg.uuid,
        'state': 'OK',
        'metadataDevice': md_dev,
        'vgMetadataDevice': md_dev
    }

    expected_sd_lease = {
        'offset': sc.ALIGNMENT_1M,
        'lockspace': sd_uuid,
        'resource': 'SDM',
        'timestamp': 0,
        'own': 0,
        'gen': 0,
        'lver': 0
    }

    # Real sanlock lockspace dump would have a unique host name for the
    # resource field and a valid lock creation timestamp, owner host id and
    # generation. For fake sanlock dump we only dump what fake sanlock
    # registers for test purpose.
    expected_lockspace = [{
        'offset': 0,
        'lockspace': sd_uuid,
        'resource': 0,
        'timestamp': 0,
        'own': 0,
        'gen': 0
    }]

    assert dom.dump() == {"metadata": expected_metadata, "volumes": {}}

    img_uuid = str(uuid.uuid4())
    vol_uuid = str(uuid.uuid4())
    vol_capacity = 10 * GiB
    vol_ctime = 1582196150

    with monkeypatch.context() as mc:
        mc.setattr(time, "time", lambda: vol_ctime)
        dom.createVolume(diskType=sc.DATA_DISKTYPE,
                         imgUUID=img_uuid,
                         preallocate=sc.SPARSE_VOL,
                         desc="test",
                         capacity=vol_capacity,
                         srcImgUUID=sc.BLANK_UUID,
                         srcVolUUID=sc.BLANK_UUID,
                         volFormat=sc.COW_FORMAT,
                         volUUID=vol_uuid)

    # Create external lease.
    dom.create_lease(vol_uuid)

    lease_info = dom._manifest.lease_info(vol_uuid)
    expected_xleases = {
        vol_uuid: {
            "offset": lease_info.offset,
            "updating": False
        }
    }

    vol = dom.produceVolume(img_uuid, vol_uuid)
    mdslot = vol.getMetaSlot()
    vol_size = dom.getVolumeSize(img_uuid, vol_uuid)
    expected_volumes_metadata = {
        vol_uuid: {
            'apparentsize': vol_size.apparentsize,
            'capacity': vol_capacity,
            'ctime': vol_ctime,
            'description': 'test',
            'disktype': sc.DATA_DISKTYPE,
            'format': 'COW',
            'generation': 0,
            'image': img_uuid,
            'legality': sc.LEGAL_VOL,
            'mdslot': mdslot,
            'status': sc.VOL_STATUS_OK,
            'parent': sc.BLANK_UUID,
            'type': 'SPARSE',
            'voltype': 'LEAF',
            'truesize': vol_size.truesize
        }
    }

    expected_vol_lease = {
        'offset': (sd.RESERVED_LEASES + mdslot) * sc.ALIGNMENT_1M,
        'lockspace': sd_uuid,
        'resource': vol_uuid,
        'timestamp': 0,
        'own': 0,
        'gen': 0,
        'lver': 0
    }

    assert dom.dump(full=True) == {
        "metadata": expected_metadata,
        "volumes": expected_volumes_metadata,
        "leases": [expected_sd_lease, expected_vol_lease],
        "lockspace": expected_lockspace,
        "xleases": expected_xleases
    }

    assert dom.dump() == {
        "metadata": expected_metadata,
        "volumes": expected_volumes_metadata
    }

    # Uninitialized volume is excluded from dump.
    with change_vol_tag(vol, "", sc.TAG_VOL_UNINIT):
        assert dom.dump() == {"metadata": expected_metadata, "volumes": {}}

    # Tagged as removed volume is dumped with removed status.
    img_tag = sc.REMOVED_IMAGE_PREFIX + img_uuid
    with change_vol_tag(vol, sc.TAG_PREFIX_IMAGE, img_tag):
        assert dom.dump() == {
            "metadata": expected_metadata,
            "volumes": {
                vol_uuid: {
                    'apparentsize': vol_size.apparentsize,
                    'capacity': vol_capacity,
                    'ctime': vol_ctime,
                    'description': 'test',
                    'disktype': sc.DATA_DISKTYPE,
                    'format': 'COW',
                    'generation': 0,
                    'image': img_uuid,
                    'legality': sc.LEGAL_VOL,
                    'mdslot': mdslot,
                    'status': sc.VOL_STATUS_REMOVED,
                    'parent': sc.BLANK_UUID,
                    'type': 'SPARSE',
                    'voltype': 'LEAF',
                    'truesize': vol_size.truesize
                }
            }
        }

    # Tagged as zeroed volume is dumped with removed status.
    img_tag = sc.ZEROED_IMAGE_PREFIX + img_uuid
    with change_vol_tag(vol, sc.TAG_PREFIX_IMAGE, img_tag):
        assert dom.dump() == {
            "metadata": expected_metadata,
            "volumes": {
                vol_uuid: {
                    'apparentsize': vol_size.apparentsize,
                    'capacity': vol_capacity,
                    'ctime': vol_ctime,
                    'description': 'test',
                    'disktype': sc.DATA_DISKTYPE,
                    'format': 'COW',
                    'generation': 0,
                    'image': img_uuid,
                    'legality': sc.LEGAL_VOL,
                    'mdslot': mdslot,
                    'status': sc.VOL_STATUS_REMOVED,
                    'parent': sc.BLANK_UUID,
                    'type': 'SPARSE',
                    'voltype': 'LEAF',
                    'truesize': vol_size.truesize
                }
            }
        }

    # Bad MD slot tag volume will be dumped with invalid status.
    with change_vol_tag(vol, sc.TAG_PREFIX_MD, "bad-slot-number"):
        assert dom.dump() == {
            "metadata": expected_metadata,
            "volumes": {
                vol_uuid: {
                    "apparentsize": vol_size.apparentsize,
                    "image": img_uuid,
                    "status": sc.VOL_STATUS_INVALID,
                    "parent": sc.BLANK_UUID,
                    "truesize": vol_size.truesize
                }
            }
        }

    # Volume with error on getting size will be dumped with invalid status.
    with monkeypatch.context() as mc:

        def bad_vol_size():
            raise Exception()

        mc.setattr(blockSD.BlockStorageDomainManifest, "getVolumeSize",
                   bad_vol_size)

        assert dom.dump() == {
            "metadata": expected_metadata,
            "volumes": {
                vol_uuid: {
                    'capacity': vol_capacity,
                    'ctime': vol_ctime,
                    'description': 'test',
                    'disktype': sc.DATA_DISKTYPE,
                    'format': 'COW',
                    'generation': 0,
                    'image': img_uuid,
                    'legality': sc.LEGAL_VOL,
                    'mdslot': mdslot,
                    'status': sc.VOL_STATUS_INVALID,
                    'parent': sc.BLANK_UUID,
                    'type': 'SPARSE',
                    'voltype': 'LEAF'
                }
            }
        }

    # Remove volume metadata.
    vol.removeMetadata((sd_uuid, mdslot))

    # Metadata volume must be INVALID, but image uuid, parent uuid and mdslot
    # can reported from the lv tags.
    assert dom.dump() == {
        "metadata": expected_metadata,
        "volumes": {
            vol_uuid: {
                "apparentsize": vol_size.apparentsize,
                "image": img_uuid,
                "status": sc.VOL_STATUS_INVALID,
                "parent": sc.BLANK_UUID,
                "mdslot": mdslot,
                "truesize": vol_size.truesize
            }
        }
    }

    # If image tag is missing the image key is omitted.
    with delete_vol_tag(vol, sc.TAG_PREFIX_IMAGE):
        assert dom.dump() == {
            "metadata": expected_metadata,
            "volumes": {
                vol_uuid: {
                    "status": sc.VOL_STATUS_INVALID,
                    "parent": sc.BLANK_UUID,
                    "mdslot": mdslot,
                }
            }
        }
Exemplo n.º 14
0
def test_create_snapshot_size(tmp_storage, tmp_repo, fake_access, fake_rescan,
                              tmp_db, fake_task, fake_sanlock, domain_version):
    # This test was added to verify fix for https://bugzilla.redhat.com/1700623
    # As a result of this bug, there can be volumes with corrupted metadata
    # capacity. The metadata of such volume should be fixed when the volume is
    # prepared. As the creation of tmp storage for block SD is time consuming,
    # let's test this flow also in this test.
    sd_uuid = str(uuid.uuid4())

    dev = tmp_storage.create_device(20 * 1024**3)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid,
                                            domainName="domain",
                                            domClass=sd.DATA_DOMAIN,
                                            vgUUID=vg.uuid,
                                            version=domain_version,
                                            storageType=sd.ISCSI_DOMAIN,
                                            block_size=sc.BLOCK_SIZE_512,
                                            alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    dom.refresh()
    dom.attach(tmp_repo.pool_id)

    img_uuid = str(uuid.uuid4())
    parent_vol_uuid = str(uuid.uuid4())
    parent_vol_capacity = constants.GIB
    vol_uuid = str(uuid.uuid4())
    vol_capacity = 2 * parent_vol_capacity

    # Create parent volume.

    dom.createVolume(imgUUID=img_uuid,
                     size=parent_vol_capacity // sc.BLOCK_SIZE_512,
                     volFormat=sc.RAW_FORMAT,
                     preallocate=sc.PREALLOCATED_VOL,
                     diskType='DATA',
                     volUUID=parent_vol_uuid,
                     desc="Test parent volume",
                     srcImgUUID=sc.BLANK_UUID,
                     srcVolUUID=sc.BLANK_UUID,
                     initialSize=None)

    parent_vol = dom.produceVolume(img_uuid, parent_vol_uuid)

    # Verify that snapshot cannot be smaller than parent.

    with pytest.raises(se.InvalidParameterException):
        dom.createVolume(imgUUID=img_uuid,
                         size=parent_vol.getSize() - 1,
                         volFormat=sc.COW_FORMAT,
                         preallocate=sc.SPARSE_VOL,
                         diskType='DATA',
                         volUUID=vol_uuid,
                         desc="Extended volume",
                         srcImgUUID=parent_vol.imgUUID,
                         srcVolUUID=parent_vol.volUUID,
                         initialSize=None)

    # Verify that snapshot can be bigger than parent.

    dom.createVolume(imgUUID=img_uuid,
                     size=vol_capacity // sc.BLOCK_SIZE_512,
                     volFormat=sc.COW_FORMAT,
                     preallocate=sc.SPARSE_VOL,
                     diskType='DATA',
                     volUUID=vol_uuid,
                     desc="Extended volume",
                     srcImgUUID=parent_vol.imgUUID,
                     srcVolUUID=parent_vol.volUUID,
                     initialSize=None)

    vol = dom.produceVolume(img_uuid, vol_uuid)

    # Verify volume sizes obtained from metadata
    actual_parent = parent_vol.getInfo()
    assert int(actual_parent["capacity"]) == parent_vol_capacity

    actual = vol.getInfo()
    assert int(actual["capacity"]) == vol_capacity

    # Now test the flow in which metadata capacity is corrupted.
    # Corrupt the metadata capacity manually.
    md = vol.getMetadata()
    md.capacity = vol_capacity // 2
    vol.setMetadata(md)

    # During preparation of the volume, matadata capacity should be fixed.
    vol.prepare()

    actual = vol.getInfo()
    assert int(actual["capacity"]) == vol_capacity
Exemplo n.º 15
0
def test_volume_life_cycle(monkeypatch, tmp_storage, tmp_repo, fake_access,
                           fake_rescan, tmp_db, fake_task, fake_sanlock):
    # as creation of block storage domain and volume is quite time consuming,
    # we test several volume operations in one test to speed up the test suite

    sd_uuid = str(uuid.uuid4())
    domain_name = "domain"
    domain_version = 4

    dev = tmp_storage.create_device(20 * 1024 ** 3)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(
        sdUUID=sd_uuid,
        domainName=domain_name,
        domClass=sd.DATA_DOMAIN,
        vgUUID=vg.uuid,
        version=domain_version,
        storageType=sd.ISCSI_DOMAIN,
        block_size=sc.BLOCK_SIZE_512,
        alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    img_uuid = str(uuid.uuid4())
    vol_uuid = str(uuid.uuid4())
    vol_capacity = 10 * 1024**3
    vol_size = vol_capacity // sc.BLOCK_SIZE_512
    vol_desc = "Test volume"

    # Create domain directory structure.
    dom.refresh()
    # Attache repo pool - SD expects at least one pool is attached.
    dom.attach(tmp_repo.pool_id)

    with monkeypatch.context() as mc:
        mc.setattr(time, "time", lambda: 1550522547)
        dom.createVolume(
            imgUUID=img_uuid,
            size=vol_size,
            volFormat=sc.COW_FORMAT,
            preallocate=sc.SPARSE_VOL,
            diskType=sc.DATA_DISKTYPE,
            volUUID=vol_uuid,
            desc=vol_desc,
            srcImgUUID=sc.BLANK_UUID,
            srcVolUUID=sc.BLANK_UUID)

    # test create volume
    vol = dom.produceVolume(img_uuid, vol_uuid)
    actual = vol.getInfo()

    expected_lease = {
        "offset": ((blockSD.RESERVED_LEASES + 4) * sc.BLOCK_SIZE_512 *
                   sd.LEASE_BLOCKS),
        "owners": [],
        "path": "/dev/{}/leases".format(sd_uuid),
        "version": None,
    }

    assert int(actual["capacity"]) == vol_capacity
    assert int(actual["ctime"]) == 1550522547
    assert actual["description"] == vol_desc
    assert actual["disktype"] == "DATA"
    assert actual["domain"] == sd_uuid
    assert actual["format"] == "COW"
    assert actual["lease"] == expected_lease
    assert actual["parent"] == sc.BLANK_UUID
    assert actual["status"] == "OK"
    assert actual["type"] == "SPARSE"
    assert actual["voltype"] == "LEAF"
    assert actual["uuid"] == vol_uuid

    vol_path = vol.getVolumePath()

    # Keep the slot before deleting the volume.
    _, slot = vol.getMetadataId()

    # test volume prepare
    assert os.path.islink(vol_path)
    assert not os.path.exists(vol_path)

    vol.prepare()

    assert os.path.exists(vol_path)

    # verify we can really write and read to an image
    qemuio.write_pattern(vol_path, "qcow2")
    qemuio.verify_pattern(vol_path, "qcow2")

    # test volume teardown
    vol.teardown(sd_uuid, vol_uuid)

    assert os.path.islink(vol_path)
    assert not os.path.exists(vol_path)

    # test also deleting of the volume
    vol.delete(postZero=False, force=False, discard=False)

    # verify lvm with volume is deleted
    assert not os.path.islink(vol.getVolumePath())
    with pytest.raises(se.LogicalVolumeDoesNotExistError):
        lvm.getLV(sd_uuid, vol_uuid)

    # verify also metadata from metadata lv is deleted
    data = dom.manifest.read_metadata_block(slot)
    assert data == b"\0" * sc.METADATA_SIZE
Exemplo n.º 16
0
def test_vg_create_multiple_devices(tmp_storage, read_only):
    dev_size = 10 * GiB
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    dev3 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.createVG(vg_name, [dev1, dev2, dev3], "initial-tag", 128)

    lvm.set_read_only(read_only)

    vg = lvm.getVG(vg_name)
    assert vg.name == vg_name
    assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3))

    # pvs is broken with read-only mode
    # https://bugzilla.redhat.com/1809660.
    lvm.set_read_only(False)

    # The first pv (metadata pv) will have the 2 used metadata areas.
    clear_stats()
    pv = lvm.getPV(dev1)
    check_stats(hits=0, misses=1)

    assert pv.name == dev1
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    # The rest of the pvs will have 2 unused metadata areas.
    for dev in dev2, dev3:
        clear_stats()
        pv = lvm.getPV(dev)
        check_stats(hits=0, misses=1)

        assert pv.name == dev
        assert pv.vg_name == vg_name
        assert int(pv.dev_size) == dev_size
        assert int(pv.mda_count) == 2
        assert int(pv.mda_used_count) == 0

    # TODO: should work also in read-only mode.
    lvm.removeVG(vg_name)

    lvm.set_read_only(read_only)

    # We remove the VG
    clear_stats()
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)
    check_stats(hits=0, misses=1)

    # pvs is broken with read-only mode
    # https://bugzilla.redhat.com/1809660.
    lvm.set_read_only(False)

    # But keep the PVs, not sure why.
    for dev in dev1, dev2, dev3:
        clear_stats()
        pv = lvm.getPV(dev)
        check_stats(hits=0, misses=1)

        assert pv.name == dev
        assert pv.vg_name == ""
Exemplo n.º 17
0
def test_volume_metadata(tmp_storage, tmp_repo, fake_access, fake_rescan,
                         tmp_db, fake_task, fake_sanlock):
    sd_uuid = str(uuid.uuid4())

    dev = tmp_storage.create_device(20 * 1024 ** 3)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(
        sdUUID=sd_uuid,
        domainName="domain",
        domClass=sd.DATA_DOMAIN,
        vgUUID=vg.uuid,
        version=4,
        storageType=sd.ISCSI_DOMAIN,
        block_size=sc.BLOCK_SIZE_512,
        alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    dom.refresh()
    dom.attach(tmp_repo.pool_id)

    img_uuid = str(uuid.uuid4())
    vol_uuid = str(uuid.uuid4())

    dom.createVolume(
        desc="old description",
        diskType="DATA",
        imgUUID=img_uuid,
        preallocate=sc.SPARSE_VOL,
        size=10 * 1024**3,
        srcImgUUID=sc.BLANK_UUID,
        srcVolUUID=sc.BLANK_UUID,
        volFormat=sc.COW_FORMAT,
        volUUID=vol_uuid)

    vol = dom.produceVolume(img_uuid, vol_uuid)

    # Test metadata offset
    _, slot = vol.getMetadataId()
    offset = dom.manifest.metadata_offset(slot)
    assert offset == slot * blockSD.METADATA_SLOT_SIZE_V4

    meta_path = dom.manifest.metadata_volume_path()

    # Change metadata.
    md = vol.getMetadata()
    md.description = "new description"
    vol.setMetadata(md)
    with open(meta_path) as f:
        f.seek(offset)
        data = f.read(sc.METADATA_SIZE)
    data = data.rstrip("\0")
    assert data == md.storage_format(4)

    # Add additioanl metadata.
    md = vol.getMetadata()
    vol.setMetadata(md, CAP=md.capacity)
    with open(meta_path) as f:
        f.seek(offset)
        data = f.read(sc.METADATA_SIZE)
    data = data.rstrip("\0")
    assert data == md.storage_format(4, CAP=md.capacity)
Exemplo n.º 18
0
def test_create_snapshot_size(
        tmp_storage, tmp_repo, fake_access, fake_rescan, tmp_db, fake_task,
        fake_sanlock, domain_version):
    # This test was added to verify fix for https://bugzilla.redhat.com/1700623
    # As a result of this bug, there can be volumes with corrupted metadata
    # capacity. The metadata of such volume should be fixed when the volume is
    # prepared. As the creation of tmp storage for block SD is time consuming,
    # let's test this flow also in this test.
    sd_uuid = str(uuid.uuid4())

    dev = tmp_storage.create_device(20 * 1024 ** 3)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(
        sdUUID=sd_uuid,
        domainName="domain",
        domClass=sd.DATA_DOMAIN,
        vgUUID=vg.uuid,
        version=domain_version,
        storageType=sd.ISCSI_DOMAIN,
        block_size=sc.BLOCK_SIZE_512,
        alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    dom.refresh()
    dom.attach(tmp_repo.pool_id)

    img_uuid = str(uuid.uuid4())
    parent_vol_uuid = str(uuid.uuid4())
    parent_vol_capacity = constants.GIB
    vol_uuid = str(uuid.uuid4())
    vol_capacity = 2 * parent_vol_capacity

    # Create parent volume.

    dom.createVolume(
        imgUUID=img_uuid,
        size=parent_vol_capacity // sc.BLOCK_SIZE_512,
        volFormat=sc.RAW_FORMAT,
        preallocate=sc.PREALLOCATED_VOL,
        diskType='DATA',
        volUUID=parent_vol_uuid,
        desc="Test parent volume",
        srcImgUUID=sc.BLANK_UUID,
        srcVolUUID=sc.BLANK_UUID,
        initialSize=None)

    parent_vol = dom.produceVolume(img_uuid, parent_vol_uuid)

    # Verify that snapshot cannot be smaller than parent.

    with pytest.raises(se.InvalidParameterException):
        dom.createVolume(
            imgUUID=img_uuid,
            size=parent_vol.getSize() - 1,
            volFormat=sc.COW_FORMAT,
            preallocate=sc.SPARSE_VOL,
            diskType='DATA',
            volUUID=vol_uuid,
            desc="Extended volume",
            srcImgUUID=parent_vol.imgUUID,
            srcVolUUID=parent_vol.volUUID,
            initialSize=None)

    # Verify that snapshot can be bigger than parent.

    dom.createVolume(
        imgUUID=img_uuid,
        size=vol_capacity // sc.BLOCK_SIZE_512,
        volFormat=sc.COW_FORMAT,
        preallocate=sc.SPARSE_VOL,
        diskType='DATA',
        volUUID=vol_uuid,
        desc="Extended volume",
        srcImgUUID=parent_vol.imgUUID,
        srcVolUUID=parent_vol.volUUID,
        initialSize=None)

    vol = dom.produceVolume(img_uuid, vol_uuid)

    # Verify volume sizes obtained from metadata
    actual_parent = parent_vol.getInfo()
    assert int(actual_parent["capacity"]) == parent_vol_capacity

    actual = vol.getInfo()
    assert int(actual["capacity"]) == vol_capacity

    # Now test the flow in which metadata capacity is corrupted.
    # Corrupt the metadata capacity manually.
    md = vol.getMetadata()
    md.capacity = vol_capacity // 2
    vol.setMetadata(md)

    # During preparation of the volume, matadata capacity should be fixed.
    vol.prepare()

    actual = vol.getInfo()
    assert int(actual["capacity"]) == vol_capacity
Exemplo n.º 19
0
def test_convert_to_v5_block(tmpdir, tmp_repo, tmp_storage, tmp_db,
                             fake_rescan, fake_task, fake_sanlock,
                             src_version):
    sd_uuid = str(uuid.uuid4())

    dev = tmp_storage.create_device(20 * GiB)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid,
                                            domainName="domain",
                                            domClass=sd.DATA_DOMAIN,
                                            vgUUID=vg.uuid,
                                            version=src_version,
                                            storageType=sd.ISCSI_DOMAIN)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    # Create domain directory structure.
    dom.refresh()

    # Only attached domains are converted.
    dom.attach(tmp_repo.pool_id)

    # Create some volumes in v4 format.
    for i in range(3):
        dom.createVolume(desc="Awesome volume %d" % i,
                         diskType="DATA",
                         imgUUID=str(uuid.uuid4()),
                         preallocate=sc.SPARSE_VOL,
                         capacity=10 * GiB,
                         srcImgUUID=sc.BLANK_UUID,
                         srcVolUUID=sc.BLANK_UUID,
                         volFormat=sc.COW_FORMAT,
                         volUUID=str(uuid.uuid4()))

    # Record domain and volumes metadata before conversion.
    old_dom_md = dom.getMetadata()
    volumes_md = {vol.volUUID: vol.getMetadata() for vol in dom.iter_volumes()}

    # Simulate a partly-deleted volume with cleared metada. Such volumes could
    # be created by vdsm < 4.20.34-1.

    img_id = str(uuid.uuid4())
    vol_id = str(uuid.uuid4())

    dom.createVolume(desc="Half deleted volume",
                     diskType="DATA",
                     imgUUID=img_id,
                     preallocate=sc.SPARSE_VOL,
                     capacity=10 * GiB,
                     srcImgUUID=sc.BLANK_UUID,
                     srcVolUUID=sc.BLANK_UUID,
                     volFormat=sc.COW_FORMAT,
                     volUUID=vol_id)

    partly_deleted_vol = dom.produceVolume(img_id, vol_id)
    slot = partly_deleted_vol.getMetadataId()[1]
    dom.manifest.write_metadata_block(slot, CLEARED_VOLUME_METADATA)

    # Simulate a volume with invalid metada to make sure such volume will not
    # break conversion.

    img_id = str(uuid.uuid4())
    vol_id = str(uuid.uuid4())

    dom.createVolume(desc="Volume with invalid metadata",
                     diskType="DATA",
                     imgUUID=img_id,
                     preallocate=sc.SPARSE_VOL,
                     capacity=10 * GiB,
                     srcImgUUID=sc.BLANK_UUID,
                     srcVolUUID=sc.BLANK_UUID,
                     volFormat=sc.COW_FORMAT,
                     volUUID=vol_id)

    invalid_md_vol = dom.produceVolume(img_id, vol_id)
    slot = invalid_md_vol.getMetadataId()[1]
    dom.manifest.write_metadata_block(slot, INVALID_VOLUME_METADATA)

    # These volumes will not be converted to V5 format.
    skip_volumes = {partly_deleted_vol.volUUID, invalid_md_vol.volUUID}

    # Convert the domain.

    fc = formatconverter.DefaultFormatConverter()

    fc.convert(repoPath=tmp_repo.path,
               hostId=1,
               imageRepo=dom,
               isMsd=False,
               targetFormat='5')

    # Verify changes in domain metadata.

    new_dom_md = dom.getMetadata()

    # Keys modified in v5.
    assert old_dom_md.pop("VERSION") == src_version
    assert new_dom_md.pop("VERSION") == 5

    # Keys added in V5.
    assert new_dom_md.pop("BLOCK_SIZE") == sc.BLOCK_SIZE_512
    assert new_dom_md.pop("ALIGNMENT") == sc.ALIGNMENT_1M

    # Kyes removed in v5.
    assert old_dom_md.pop("LOGBLKSIZE") == sc.BLOCK_SIZE_512
    assert old_dom_md.pop("PHYBLKSIZE") == sc.BLOCK_SIZE_512

    # Rest of the keys must not be modifed by conversion.
    assert old_dom_md == new_dom_md

    # Verify that xleases volume is created when upgrading from version < 4.
    xleases_vol = lvm.getLV(sd_uuid, sd.XLEASES)
    assert int(xleases_vol.size) == sd.XLEASES_SLOTS * dom.alignment

    with pytest.raises(se.NoSuchLease):
        dom.manifest.lease_info("no-such-lease")

    # Verify that volumes metadta was converted to v5 format.

    for vol in dom.iter_volumes():
        if vol.volUUID in skip_volumes:
            continue
        vol_md = volumes_md[vol.volUUID]
        _, slot = vol.getMetadataId()
        data = dom.manifest.read_metadata_block(slot)
        data = data.rstrip(b"\0")
        assert data == vol_md.storage_format(5)

    # Verify that invalid metadata was copied to v5 area.

    slot = partly_deleted_vol.getMetadataId()[1]
    assert dom.manifest.read_metadata_block(slot) == CLEARED_VOLUME_METADATA

    slot = invalid_md_vol.getMetadataId()[1]
    assert dom.manifest.read_metadata_block(slot) == INVALID_VOLUME_METADATA

    # Check that v4 metadata area is zeroed.

    meta_path = dom.manifest.metadata_volume_path()
    offset = blockSD.METADATA_BASE_V4
    size = blockSD.METADATA_BASE_V5 - blockSD.METADATA_BASE_V4
    data = misc.readblock(meta_path, offset, size)
    assert data == b"\0" * size
Exemplo n.º 20
0
def test_volume_life_cycle(monkeypatch, tmp_storage, tmp_repo, fake_access,
                           fake_rescan, tmp_db, fake_task, fake_sanlock):
    # as creation of block storage domain and volume is quite time consuming,
    # we test several volume operations in one test to speed up the test suite

    sd_uuid = str(uuid.uuid4())
    domain_name = "domain"
    domain_version = 4

    dev = tmp_storage.create_device(20 * 1024**3)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid,
                                            domainName=domain_name,
                                            domClass=sd.DATA_DOMAIN,
                                            vgUUID=vg.uuid,
                                            version=domain_version,
                                            storageType=sd.ISCSI_DOMAIN,
                                            block_size=sc.BLOCK_SIZE_512,
                                            alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    img_uuid = str(uuid.uuid4())
    vol_uuid = str(uuid.uuid4())
    vol_capacity = 10 * 1024**3
    vol_size = vol_capacity // sc.BLOCK_SIZE_512
    vol_desc = "Test volume"

    # Create domain directory structure.
    dom.refresh()
    # Attache repo pool - SD expects at least one pool is attached.
    dom.attach(tmp_repo.pool_id)

    with monkeypatch.context() as mc:
        mc.setattr(time, "time", lambda: 1550522547)
        dom.createVolume(imgUUID=img_uuid,
                         size=vol_size,
                         volFormat=sc.COW_FORMAT,
                         preallocate=sc.SPARSE_VOL,
                         diskType=sc.DATA_DISKTYPE,
                         volUUID=vol_uuid,
                         desc=vol_desc,
                         srcImgUUID=sc.BLANK_UUID,
                         srcVolUUID=sc.BLANK_UUID)

    # test create volume
    vol = dom.produceVolume(img_uuid, vol_uuid)
    actual = vol.getInfo()

    expected_lease = {
        "offset":
        ((blockSD.RESERVED_LEASES + 4) * sc.BLOCK_SIZE_512 * sd.LEASE_BLOCKS),
        "owners": [],
        "path":
        "/dev/{}/leases".format(sd_uuid),
        "version":
        None,
    }

    assert int(actual["capacity"]) == vol_capacity
    assert int(actual["ctime"]) == 1550522547
    assert actual["description"] == vol_desc
    assert actual["disktype"] == "DATA"
    assert actual["domain"] == sd_uuid
    assert actual["format"] == "COW"
    assert actual["lease"] == expected_lease
    assert actual["parent"] == sc.BLANK_UUID
    assert actual["status"] == "OK"
    assert actual["type"] == "SPARSE"
    assert actual["voltype"] == "LEAF"
    assert actual["uuid"] == vol_uuid

    vol_path = vol.getVolumePath()

    # Keep the slot before deleting the volume.
    _, slot = vol.getMetadataId()

    # test volume prepare
    assert os.path.islink(vol_path)
    assert not os.path.exists(vol_path)

    vol.prepare()

    assert os.path.exists(vol_path)

    # verify we can really write and read to an image
    qemuio.write_pattern(vol_path, "qcow2")
    qemuio.verify_pattern(vol_path, "qcow2")

    # test volume teardown
    vol.teardown(sd_uuid, vol_uuid)

    assert os.path.islink(vol_path)
    assert not os.path.exists(vol_path)

    # test also deleting of the volume
    vol.delete(postZero=False, force=False, discard=False)

    # verify lvm with volume is deleted
    assert not os.path.islink(vol.getVolumePath())
    with pytest.raises(se.LogicalVolumeDoesNotExistError):
        lvm.getLV(sd_uuid, vol_uuid)
Exemplo n.º 21
0
def test_convert_from_v4_to_v5_block(tmpdir, tmp_repo, tmp_storage, tmp_db,
                                     fake_rescan, fake_task, fake_sanlock):
    sd_uuid = str(uuid.uuid4())

    dev = tmp_storage.create_device(20 * 1024**3)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid,
                                            domainName="domain",
                                            domClass=sd.DATA_DOMAIN,
                                            vgUUID=vg.uuid,
                                            version=4,
                                            storageType=sd.ISCSI_DOMAIN,
                                            block_size=sc.BLOCK_SIZE_512,
                                            alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    # Create domain directory structure.
    dom.refresh()

    # Only attached domains are converted.
    dom.attach(tmp_repo.pool_id)

    # Create some volumes in v4 format.
    for i in range(3):
        dom.createVolume(desc="Awesome volume %d" % i,
                         diskType="DATA",
                         imgUUID=str(uuid.uuid4()),
                         preallocate=sc.SPARSE_VOL,
                         size=10 * 1024**3,
                         srcImgUUID=sc.BLANK_UUID,
                         srcVolUUID=sc.BLANK_UUID,
                         volFormat=sc.COW_FORMAT,
                         volUUID=str(uuid.uuid4()))

    # Record domain and volumes metadata before conversion.
    old_dom_md = dom.getMetadata()
    volumes_md = {vol.volUUID: vol.getMetadata() for vol in dom.iter_volumes()}

    fc = formatconverter.DefaultFormatConverter()

    fc.convert(repoPath=tmp_repo.path,
               hostId=1,
               imageRepo=dom,
               isMsd=False,
               targetFormat='5')

    # Verify changes in domain metadata.

    new_dom_md = dom.getMetadata()

    # Keys modified in v5.
    assert old_dom_md.pop("VERSION") == 4
    assert new_dom_md.pop("VERSION") == 5

    # Keys added in V5.
    assert new_dom_md.pop("BLOCK_SIZE") == sc.BLOCK_SIZE_512
    assert new_dom_md.pop("ALIGNMENT") == sc.ALIGNMENT_1M

    # Kyes removed in v5.
    assert old_dom_md.pop("LOGBLKSIZE") == sc.BLOCK_SIZE_512
    assert old_dom_md.pop("PHYBLKSIZE") == sc.BLOCK_SIZE_512

    # Rest of the keys must not be modifed by conversion.
    assert old_dom_md == new_dom_md

    # Verify that volumes metadta was converted to v5 format.

    for vol in dom.iter_volumes():
        vol_md = volumes_md[vol.volUUID]
        _, slot = vol.getMetadataId()
        data = dom.manifest.read_metadata_block(slot)
        data = data.rstrip("\0")
        assert data == vol_md.storage_format(5)

    # Check that v4 metadata area is zeroed.

    meta_path = dom.manifest.metadata_volume_path()
    offset = blockSD.METADATA_BASE_V4
    size = blockSD.METADATA_BASE_V5 - blockSD.METADATA_BASE_V4
    data = misc.readblock(meta_path, offset, size)
    assert data == "\0" * size
Exemplo n.º 22
0
def test_volume_metadata(tmp_storage, tmp_repo, fake_access, fake_rescan,
                         tmp_db, fake_task, fake_sanlock):
    sd_uuid = str(uuid.uuid4())

    dev = tmp_storage.create_device(20 * 1024**3)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid,
                                            domainName="domain",
                                            domClass=sd.DATA_DOMAIN,
                                            vgUUID=vg.uuid,
                                            version=4,
                                            storageType=sd.ISCSI_DOMAIN,
                                            block_size=sc.BLOCK_SIZE_512,
                                            alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    dom.refresh()
    dom.attach(tmp_repo.pool_id)

    img_uuid = str(uuid.uuid4())
    vol_uuid = str(uuid.uuid4())

    dom.createVolume(desc="old description",
                     diskType="DATA",
                     imgUUID=img_uuid,
                     preallocate=sc.SPARSE_VOL,
                     size=10 * 1024**3,
                     srcImgUUID=sc.BLANK_UUID,
                     srcVolUUID=sc.BLANK_UUID,
                     volFormat=sc.COW_FORMAT,
                     volUUID=vol_uuid)

    vol = dom.produceVolume(img_uuid, vol_uuid)

    # Test metadata offset
    _, slot = vol.getMetadataId()
    offset = dom.manifest.metadata_offset(slot)
    assert offset == slot * blockSD.METADATA_SLOT_SIZE_V4

    meta_path = dom.manifest.metadata_volume_path()

    # Change metadata.
    md = vol.getMetadata()
    md.description = "new description"
    vol.setMetadata(md)
    with open(meta_path) as f:
        f.seek(offset)
        data = f.read(sc.METADATA_SIZE)
    data = data.rstrip("\0")
    assert data == md.storage_format(4)

    # Add additioanl metadata.
    md = vol.getMetadata()
    vol.setMetadata(md, CAP=md.capacity)
    with open(meta_path) as f:
        f.seek(offset)
        data = f.read(sc.METADATA_SIZE)
    data = data.rstrip("\0")
    assert data == md.storage_format(4, CAP=md.capacity)
Exemplo n.º 23
0
def test_create_domain_metadata(tmp_storage, tmp_repo, fake_sanlock,
                                domain_version):
    sd_uuid = str(uuid.uuid4())
    domain_name = "loop-domain"

    dev1 = tmp_storage.create_device(10 * GiB)
    dev2 = tmp_storage.create_device(10 * GiB)
    lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG,
                 128)
    vg = lvm.getVG(sd_uuid)
    pv1 = lvm.getPV(dev1)
    pv2 = lvm.getPV(dev2)

    dom = blockSD.BlockStorageDomain.create(
        sdUUID=sd_uuid,
        domainName=domain_name,
        domClass=sd.DATA_DOMAIN,
        vgUUID=vg.uuid,
        version=domain_version,
        storageType=sd.ISCSI_DOMAIN)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    lease = sd.DEFAULT_LEASE_PARAMS
    expected = {
        # Common storage domain values.
        sd.DMDK_CLASS: sd.DATA_DOMAIN,
        sd.DMDK_DESCRIPTION: domain_name,
        sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC],
        sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES],
        sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC],
        sd.DMDK_LOCK_POLICY: "",
        sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC:
            lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
        sd.DMDK_POOLS: [],
        sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
        sd.DMDK_SDUUID: sd_uuid,
        sd.DMDK_TYPE: sd.ISCSI_DOMAIN,
        sd.DMDK_VERSION: domain_version,

        # Block storge domain extra values.
        blockSD.DMDK_VGUUID: vg.uuid,

        # PV keys for blockSD.DMDK_PV_REGEX.
        "PV0": {
            'guid': os.path.basename(dev1),
            'mapoffset': '0',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv1.uuid,
        },
        "PV1": {
            'guid': os.path.basename(dev2),
            'mapoffset': '77',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv2.uuid,
        },
    }

    # In version 5 we removed LOGBLKSIZE and PHYBLKSIZE and added
    # ALIGNMENT and BLOCK_SIZE.
    if domain_version < 5:
        expected[sd.DMDK_LOGBLKSIZE] = sc.BLOCK_SIZE_512
        expected[sd.DMDK_PHYBLKSIZE] = sc.BLOCK_SIZE_512
    else:
        expected[sd.DMDK_ALIGNMENT] = sc.ALIGNMENT_1M
        expected[sd.DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512

    # Tests also alignment and block size properties here.
    assert dom.alignment == sc.ALIGNMENT_1M
    assert dom.block_size == sc.BLOCK_SIZE_512

    actual = dom.getMetadata()

    assert expected == actual

    # Check that first PV is device where metadata is stored.
    assert dev1 == lvm.getVgMetadataPv(dom.sdUUID)

    lv = lvm.getLV(dom.sdUUID, sd.METADATA)
    assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * MiB

    # Test the domain lease.
    lease = dom.getClusterLease()
    assert lease.name == "SDM"
    assert lease.path == "/dev/{}/leases".format(dom.sdUUID)
    assert lease.offset == dom.alignment

    resource = fake_sanlock.read_resource(
        lease.path,
        lease.offset,
        align=dom.alignment,
        sector=dom.block_size)

    assert resource == {
        "acquired": False,
        "align": dom.alignment,
        "lockspace": dom.sdUUID.encode("utf-8"),
        "resource": lease.name.encode("utf-8"),
        "sector": dom.block_size,
        "version": 0,
    }

    # Test special volumes sizes.

    for name in (sd.IDS, sd.INBOX, sd.OUTBOX, sd.METADATA):
        lv = lvm.getLV(dom.sdUUID, name)
        # This is the minimal LV size on block storage.
        assert int(lv.size) == 128 * MiB

    lv = lvm.getLV(dom.sdUUID, blockSD.MASTERLV)
    assert int(lv.size) == GiB

    lv = lvm.getLV(dom.sdUUID, sd.LEASES)
    assert int(lv.size) == sd.LEASES_SLOTS * dom.alignment

    if domain_version > 3:
        lv = lvm.getLV(dom.sdUUID, sd.XLEASES)
        assert int(lv.size) == sd.XLEASES_SLOTS * dom.alignment
Exemplo n.º 24
0
def test_vg_create_remove_single_device(tmp_storage, read_only):
    dev_size = 20 * GiB
    dev = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.createVG(vg_name, [dev], "initial-tag", 128)

    lvm.set_read_only(read_only)

    clear_stats()
    vg = lvm.getVG(vg_name)
    check_stats(hits=0, misses=1)

    assert vg.name == vg_name
    assert vg.pv_name == (dev,)
    assert vg.tags == ("initial-tag",)
    assert int(vg.extent_size) == 128 * MiB

    # pvs is broken with read-only mode
    # https://bugzilla.redhat.com/1809660.
    lvm.set_read_only(False)

    clear_stats()
    pv = lvm.getPV(dev)
    check_stats(hits=0, misses=1)

    # Call getPV again to see we also get cache hit.
    lvm.getPV(dev)
    check_stats(hits=1, misses=1)

    lvm.set_read_only(read_only)

    assert pv.name == dev
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    lvm.set_read_only(False)

    # TODO: should work also in read-only mode.
    lvm.removeVG(vg_name)

    lvm.set_read_only(read_only)

    # We remove the VG
    clear_stats()
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)
    check_stats(hits=0, misses=1)

    # pvs is broken with read-only mode
    # https://bugzilla.redhat.com/1809660.
    lvm.set_read_only(False)

    # But keep the PVs, not sure why.
    clear_stats()
    pv = lvm.getPV(dev)
    check_stats(hits=0, misses=1)

    assert pv.name == dev
    assert pv.vg_name == ""
Exemplo n.º 25
0
def test_create_with_bitmaps(tmp_storage, tmp_repo, fake_access, fake_rescan,
                             tmp_db, fake_task, fake_sanlock, domain_version):
    sd_uuid = str(uuid.uuid4())

    dev = tmp_storage.create_device(20 * GiB)
    lvm.createVG(sd_uuid, [dev], blockSD.STORAGE_UNREADY_DOMAIN_TAG, 128)
    vg = lvm.getVG(sd_uuid)

    dom = blockSD.BlockStorageDomain.create(sdUUID=sd_uuid,
                                            domainName="domain",
                                            domClass=sd.DATA_DOMAIN,
                                            vgUUID=vg.uuid,
                                            version=domain_version,
                                            storageType=sd.ISCSI_DOMAIN)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    dom.refresh()
    dom.attach(tmp_repo.pool_id)

    img_uuid = str(uuid.uuid4())
    base_vol_uuid = str(uuid.uuid4())
    base_vol_capacity = GiB
    top_vol_uuid = str(uuid.uuid4())
    vol_capacity = 2 * base_vol_capacity
    bitmap_names = ['bitmap1', 'bitmap2']

    # Create base volume.
    dom.createVolume(imgUUID=img_uuid,
                     capacity=base_vol_capacity,
                     volFormat=sc.COW_FORMAT,
                     preallocate=sc.SPARSE_VOL,
                     diskType='DATA',
                     volUUID=base_vol_uuid,
                     desc="Test base volume",
                     srcImgUUID=sc.BLANK_UUID,
                     srcVolUUID=sc.BLANK_UUID)

    base_vol = dom.produceVolume(img_uuid, base_vol_uuid)
    base_vol_path = base_vol.getVolumePath()

    # Prepare the volume in order to create bitmaps
    base_vol.prepare()
    # Add new bitmaps to base volume
    for bitmap_name in bitmap_names:
        op = qemuimg.bitmap_add(
            base_vol_path,
            bitmap_name,
        )
        op.run()

    # Teardown the volume, test if prepare() will be
    # called during the snapshot creation
    base_vol.teardown(sd_uuid, base_vol_uuid)

    # Create top volume with bitmaps.
    dom.createVolume(imgUUID=img_uuid,
                     capacity=vol_capacity,
                     volFormat=sc.COW_FORMAT,
                     preallocate=sc.SPARSE_VOL,
                     diskType='DATA',
                     volUUID=top_vol_uuid,
                     desc="Test top volume",
                     srcImgUUID=base_vol.imgUUID,
                     srcVolUUID=base_vol.volUUID,
                     add_bitmaps=True)

    top_vol = dom.produceVolume(img_uuid, top_vol_uuid)
    top_vol_path = top_vol.getVolumePath()

    # Prepare the volume in order to get
    # info on the bitmaps
    top_vol.prepare()

    info = qemuimg.info(top_vol_path)

    # Teardown top volume
    base_vol.teardown(sd_uuid, top_vol_uuid)

    assert info['format-specific']['data']['bitmaps'] == [
        {
            "flags": ["auto"],
            "name": bitmap_names[0],
            "granularity": 65536
        },
        {
            "flags": ["auto"],
            "name": bitmap_names[1],
            "granularity": 65536
        },
    ]
Exemplo n.º 26
0
def test_vg_extend_reduce(tmp_storage):
    dev_size = 10 * GiB
    dev1 = tmp_storage.create_device(dev_size)
    dev2 = tmp_storage.create_device(dev_size)
    dev3 = tmp_storage.create_device(dev_size)
    vg_name = str(uuid.uuid4())

    lvm.set_read_only(False)

    lvm.createVG(vg_name, [dev1], "initial-tag", 128)

    clear_stats()
    vg = lvm.getVG(vg_name)
    check_stats(hits=0, misses=1)

    # Call getVG() again will get cache hit.
    lvm.getVG(vg_name)
    check_stats(hits=1, misses=1)

    assert vg.pv_name == (dev1,)

    lvm.extendVG(vg_name, [dev2, dev3], force=False)

    clear_stats()
    vg = lvm.getVG(vg_name)
    # Calling getVG() after extendVG() does not use the cache.
    # This happens because extendVG() invalidates the VG.
    check_stats(hits=0, misses=1)

    assert sorted(vg.pv_name) == sorted((dev1, dev2, dev3))

    clear_stats()
    # The first pv (metadata pv) will have the 2 used metadata areas.
    pv = lvm.getPV(dev1)
    check_stats(hits=0, misses=1)

    assert pv.name == dev1
    assert pv.vg_name == vg_name
    assert int(pv.dev_size) == dev_size
    assert int(pv.mda_count) == 2
    assert int(pv.mda_used_count) == 2

    # The rest of the pvs will have 2 unused metadata areas.
    for dev in dev2, dev3:
        clear_stats()
        pv = lvm.getPV(dev)
        check_stats(hits=0, misses=1)

        assert pv.name == dev
        assert pv.vg_name == vg_name
        assert int(pv.dev_size) == dev_size
        assert int(pv.mda_count) == 2
        assert int(pv.mda_used_count) == 0

    lvm.reduceVG(vg_name, dev2)
    clear_stats()
    vg = lvm.getVG(vg_name)
    # Calling getVG() after reduceVG() does not use the cache.
    # This happens because reduceVG() invalidates the VG.
    check_stats(hits=0, misses=1)

    assert sorted(vg.pv_name) == sorted((dev1, dev3))

    lvm.removeVG(vg_name)

    clear_stats()
    with pytest.raises(se.VolumeGroupDoesNotExist):
        lvm.getVG(vg_name)
    check_stats(hits=0, misses=1)
Exemplo n.º 27
0
def test_create_domain_metadata(tmp_storage, tmp_repo, fake_sanlock,
                                domain_version):
    sd_uuid = str(uuid.uuid4())
    domain_name = "loop-domain"

    dev1 = tmp_storage.create_device(10 * 1024**3)
    dev2 = tmp_storage.create_device(10 * 1024**3)
    lvm.createVG(sd_uuid, [dev1, dev2], blockSD.STORAGE_UNREADY_DOMAIN_TAG,
                 128)
    vg = lvm.getVG(sd_uuid)
    pv1 = lvm.getPV(dev1)
    pv2 = lvm.getPV(dev2)

    dom = blockSD.BlockStorageDomain.create(
        sdUUID=sd_uuid,
        domainName=domain_name,
        domClass=sd.DATA_DOMAIN,
        vgUUID=vg.uuid,
        version=domain_version,
        storageType=sd.ISCSI_DOMAIN,
        block_size=sc.BLOCK_SIZE_512,
        alignment=sc.ALIGNMENT_1M)

    sdCache.knownSDs[sd_uuid] = blockSD.findDomain
    sdCache.manuallyAddDomain(dom)

    lease = sd.DEFAULT_LEASE_PARAMS
    expected = {
        # Common storage domain values.
        sd.DMDK_CLASS: sd.DATA_DOMAIN,
        sd.DMDK_DESCRIPTION: domain_name,
        sd.DMDK_IO_OP_TIMEOUT_SEC: lease[sd.DMDK_IO_OP_TIMEOUT_SEC],
        sd.DMDK_LEASE_RETRIES: lease[sd.DMDK_LEASE_RETRIES],
        sd.DMDK_LEASE_TIME_SEC: lease[sd.DMDK_LEASE_TIME_SEC],
        sd.DMDK_LOCK_POLICY: "",
        sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC:
            lease[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
        sd.DMDK_POOLS: [],
        sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
        sd.DMDK_SDUUID: sd_uuid,
        sd.DMDK_TYPE: sd.ISCSI_DOMAIN,
        sd.DMDK_VERSION: domain_version,

        # Block storge domain extra values.
        blockSD.DMDK_VGUUID: vg.uuid,

        # PV keys for blockSD.DMDK_PV_REGEX.
        "PV0": {
            'guid': os.path.basename(dev1),
            'mapoffset': '0',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv1.uuid,
        },
        "PV1": {
            'guid': os.path.basename(dev2),
            'mapoffset': '77',
            'pecount': '77',
            'pestart': '0',
            'uuid': pv2.uuid,
        },
    }

    # In version 5 we removed LOGBLKSIZE and PHYBLKSIZE and added
    # ALIGNMENT and BLOCK_SIZE.
    if domain_version < 5:
        expected[sd.DMDK_LOGBLKSIZE] = sc.BLOCK_SIZE_512
        expected[sd.DMDK_PHYBLKSIZE] = sc.BLOCK_SIZE_512
    else:
        expected[sd.DMDK_ALIGNMENT] = sc.ALIGNMENT_1M
        expected[sd.DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512

    actual = dom.getMetadata()

    assert expected == actual

    # Check that first PV is device where metadata is stored.
    assert dev1 == lvm.getVgMetadataPv(dom.sdUUID)

    lv = lvm.getLV(dom.sdUUID, sd.METADATA)
    assert int(lv.size) == blockSD.METADATA_LV_SIZE_MB * constants.MEGAB