def test_lvm_logical_volume_segtype_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("513 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) with self.assertRaises(ValueError): lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"), fmt=blivet.formats.get_format("xfs"), exists=False, seg_type="raid8", pvs=[pv, pv2]) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"), fmt=blivet.formats.get_format("xfs"), exists=False, seg_type="striped", pvs=[pv, pv2]) self.assertEqual(lv.seg_type, "striped")
def test_lvmcached_two_logical_volume_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("512 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) cache_req = LVMCacheRequest(Size("256 MiB"), [pv2], "writethrough") lv1 = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=blivet.formats.get_format("xfs"), exists=False, cache_request=cache_req) cache_req = LVMCacheRequest(Size("256 MiB"), [pv2], "writethrough") lv2 = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=blivet.formats.get_format("xfs"), exists=False, cache_request=cache_req) cache = lv1.cache self.assertIsNotNone(cache) # 256 MiB - 8 MiB (metadata) - 8 MiB (pmspare) self.assertEqual(cache.size, Size("240 MiB")) cache = lv2.cache self.assertIsNotNone(cache) # already have pmspare space reserved for lv1's cache (and shared) # 256 MiB - 8 MiB (metadata) [no pmspare] self.assertEqual(cache.size, Size("248 MiB"))
def testLVMSnapShotDeviceInit(self): pv = StorageDevice("pv1", fmt=blivet.formats.getFormat("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=blivet.formats.getFormat("xfs")) with self.assertRaisesRegex(ValueError, "lvm snapshot devices require an origin lv"): LVMSnapShotDevice("snap1", parents=[vg]) with self.assertRaisesRegex(ValueError, "lvm snapshot origin volume must already exist"): LVMSnapShotDevice("snap1", parents=[vg], origin=lv) with self.assertRaisesRegex(ValueError, "lvm snapshot origin must be a logical volume"): LVMSnapShotDevice("snap1", parents=[vg], origin=pv) with self.assertRaisesRegex(ValueError, "only existing vorigin snapshots are supported"): LVMSnapShotDevice("snap1", parents=[vg], vorigin=True) lv.exists = True snap1 = LVMSnapShotDevice("snap1", parents=[vg], origin=lv) self.assertEqual(snap1.format.type, lv.format.type) lv.format = blivet.formats.getFormat("DM_snapshot_cow", exists=True) self.assertEqual(snap1.format.type, lv.format.type) self.assertEqual(snap1.isleaf, True) self.assertEqual(snap1.direct, True) self.assertEqual(lv.isleaf, False) self.assertEqual(lv.direct, True) self.assertEqual(snap1.dependsOn(lv), True) self.assertEqual(lv.dependsOn(snap1), False)
def test_lvmsnap_shot_device_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=blivet.formats.get_format("xfs")) with six.assertRaisesRegex( self, errors.DeviceError, "lvm snapshot origin must be a logical volume"): LVMLogicalVolumeDevice("snap1", parents=[vg], origin=pv) with six.assertRaisesRegex( self, errors.DeviceError, "only existing vorigin snapshots are supported"): LVMLogicalVolumeDevice("snap1", parents=[vg], vorigin=True) lv.exists = True snap1 = LVMLogicalVolumeDevice("snap1", parents=[vg], origin=lv) self.assertEqual(snap1.format.type, lv.format.type) lv.format = blivet.formats.get_format("DM_snapshot_cow", exists=True) self.assertEqual(snap1.format.type, lv.format.type) self.assertEqual(snap1.isleaf, True) self.assertEqual(snap1.direct, True) self.assertEqual(lv.isleaf, False) self.assertEqual(lv.direct, True) self.assertEqual(snap1.depends_on(lv), True) self.assertEqual(lv.depends_on(snap1), False)
def test_new_lv_from_lvs(self): b = blivet.Blivet() pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB"), exists=True) vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True) lv1 = LVMLogicalVolumeDevice("data_lv", parents=[vg], size=Size("500 MiB"), exists=True) lv2 = LVMLogicalVolumeDevice("metadata_lv", parents=[vg], size=Size("50 MiB"), exists=True) for dev in (pv, vg, lv1, lv2): b.devicetree._add_device(dev) # check that all the above devices are in the expected places self.assertEqual(set(b.devices), {pv, vg, lv1, lv2}) self.assertEqual(set(b.vgs), {vg}) self.assertEqual(set(b.lvs), {lv1, lv2}) self.assertEqual(set(b.vgs[0].lvs), {lv1, lv2}) self.assertEqual(vg.size, Size("1020 MiB")) self.assertEqual(lv1.size, Size("500 MiB")) self.assertEqual(lv2.size, Size("50 MiB")) # combine the two LVs into a thin pool (the LVs should become its internal LVs) pool = b.new_lv_from_lvs(vg, name="pool", seg_type="thin-pool", from_lvs=(lv1, lv2)) # add the pool LV into the devicetree b.devicetree._add_device(pool) self.assertEqual(set(b.devices), {pv, vg, pool}) self.assertEqual(set(b.vgs), {vg}) self.assertEqual(set(b.lvs), {pool}) self.assertEqual(set(b.vgs[0].lvs), {pool}) self.assertEqual(set(b.vgs[0].lvs[0]._internal_lvs), {lv1, lv2}) self.assertTrue(lv1.is_internal_lv) self.assertEqual(lv1.int_lv_type, LVMInternalLVtype.data) self.assertEqual(lv1.size, Size("500 MiB")) self.assertTrue(lv2.is_internal_lv) self.assertEqual(lv2.int_lv_type, LVMInternalLVtype.meta) self.assertEqual(lv2.size, Size("50 MiB")) self.assertEqual(pool.name, "testvg-pool") self.assertEqual(pool.size, Size("500 MiB")) self.assertEqual(pool.metadata_size, Size("50 MiB")) self.assertIs(pool.vg, vg) with patch("blivet.devices.lvm.blockdev.lvm") as lvm: with patch.object(pool, "_pre_create"): pool.create() self.assertTrue(lvm.thpool_convert.called)
def testLogicalVolume(self): good_names = ['lv00', 'volume-name', 'volumename-'] bad_names = ['-leading-hyphen', 'únicode', 'sp aces', 'snapshot47', 'pvmove0', 'sub_tmetastring'] for name in good_names: self.assertTrue(LVMLogicalVolumeDevice.isNameValid(name)) for name in bad_names: self.assertFalse(LVMLogicalVolumeDevice.isNameValid(name))
def testLogicalVolume(self): good_names = ['lv00', 'volume-name', 'volumename-'] bad_names = [ '-leading-hyphen', 'únicode', 'sp aces', 'snapshot47', 'pvmove0', 'sub_tmetastring' ] for name in good_names: self.assertTrue(LVMLogicalVolumeDevice.isNameValid(name)) for name in bad_names: self.assertFalse(LVMLogicalVolumeDevice.isNameValid(name))
def _createThinPool(poolName, vg, alignment=0, poolMetaDataSize=0, poolDataSize=0): if not alignment: # bz#1180228: blivet doesn't handle percentage-based sizes properly # Workaround: Till the bz gets fixed, we take only 99% size from vg pool = LVMThinPoolDevice(poolName, parents=[vg], size=(vg.size * 99 / 100), grow=True) blivetEnv.createDevice(pool) return pool else: metaName = "meta-%s" % poolName vgPoolName = "%s/%s" % (vg.name, poolName) metaLv = LVMLogicalVolumeDevice(metaName, parents=[vg], size=blivet.size.Size( '%d KiB' % poolMetaDataSize)) poolLv = LVMLogicalVolumeDevice(poolName, parents=[vg], size=blivet.size.Size( '%d KiB' % poolDataSize)) blivetEnv.createDevice(metaLv) blivetEnv.createDevice(poolLv) blivetEnv.doIt() # bz#1100514: LVM2 currently only supports physical extent sizes # that are a power of 2. Till that support is available we need # to use lvconvert to achive that. # bz#1179826: blivet doesn't support lvconvert functionality. # Workaround: Till the bz gets fixed, lvconvert command is used rc, out, err = utils.execCmd([ _lvconvertCommandPath.cmd, '--chunksize', '%sK' % alignment, '--thinpool', vgPoolName, '--poolmetadata', "%s/%s" % (vg.name, metaName), '--poolmetadataspar', 'n', '-y' ]) if rc: raise ge.GlusterHostStorageDeviceLVConvertFailedException( vg.path, alignment, rc, out, err) rc, out, err = utils.execCmd( [_lvchangeCommandPath.cmd, '--zero', 'n', vgPoolName]) if rc: raise ge.GlusterHostStorageDeviceLVChangeFailedException( vgPoolName, rc, out, err) blivetEnv.reset() return blivetEnv.devicetree.getDeviceByName(poolLv.name)
def test_vgchunk_with_cache_pvfree(self): pv = StorageDevice("pv1", size=Size("40 GiB"), fmt=get_format("lvmpv")) # 1069 MiB so that the PV provides 1068 MiB of free space (see # LVMVolumeGroupDevice.extents) which is 44 MiB more than the caches # need and which should thus be split into the LVs pv2 = StorageDevice("pv2", size=Size("1069 MiB"), fmt=get_format("lvmpv")) vg = LVMVolumeGroupDevice("vg", parents=[pv, pv2]) cache_req1 = LVMCacheRequest(Size("512 MiB"), [pv2], "writethrough") lv1 = LVMLogicalVolumeDevice("lv1", parents=[vg], size=Size("1 GiB"), grow=True, cache_request=cache_req1) cache_req2 = LVMCacheRequest(Size("512 MiB"), [pv2], "writethrough") lv2 = LVMLogicalVolumeDevice("lv2", parents=[vg], size=Size("10 GiB"), grow=True, cache_request=cache_req2) lv3 = LVMLogicalVolumeDevice("lv3", parents=[vg], size=Size("10 GiB"), grow=True, maxsize=Size("12 GiB")) req1 = LVRequest(lv1) req2 = LVRequest(lv2) req3 = LVRequest(lv3) chunk = VGChunk(vg, requests=[req1, req2, req3]) chunk.grow_requests() # the chunk is done growing since its pool has been exhausted self.assertEqual(chunk.done, True) # there are still two requests remaining since lv1 and lv2 have no max self.assertEqual(chunk.remaining, 2) # All the sizes should be the same as without the caches (see the # test_vgchunk test for their "rationales") because the space for the # caches should just be reserved. # The extra 11 extents available on the pv2 should go in the 1:10 ratio # to req1 and req2. self.assertEqual(req1.growth, 395 + 1) self.assertEqual(req2.growth, 3956 + 10) self.assertEqual(req3.growth, 512)
def test_logical_volume(self): good_names = ['lv00', 'volume-name', 'volumename-'] bad_names = ['-leading-hyphen', 'únicode', 'sp aces', 'snapshot47', 'pvmove0', 'sub_tmetastring'] pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=blivet.formats.get_format("xfs")) for name in good_names: self.assertTrue(lv.is_name_valid(name)) for name in bad_names: self.assertFalse(lv.is_name_valid(name))
def get_snapshot_device(request, devicetree): """Get the ThinLV snapshot device. :param request: a snapshot request :param devicetree: a device tree to look up devices :return: a model of the ThinLV snapshot """ snap_name = request.name.replace('-', '--') origin = request.origin.replace('-', '--').replace('/', '-') origin_dev = devicetree.get_device_by_name(origin) log.debug("Snapshot: name %s has origin %s", request.name, origin_dev) if origin_dev is None: raise StorageError( _("Snapshot: origin \"{}\" doesn't exist!").format(request.origin)) if not origin_dev.is_thin_lv: raise StorageError( _("Snapshot: origin \"{}\" of snapshot \"{}\" is not a valid " "thin LV device.").format(request.origin, request.name)) if devicetree.get_device_by_name("%s-%s" % (origin_dev.vg.name, snap_name)): raise StorageError( _("Snapshot {} already exists.").format(request.name)) try: return LVMLogicalVolumeDevice(name=request.name, parents=[origin_dev.pool], seg_type="thin", origin=origin_dev) except ValueError as e: raise StorageError(str(e)) from e
def setUp(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) self.lv = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=blivet.formats.get_format("xfs")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) pv3 = StorageDevice("pv3", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg2 = LVMVolumeGroupDevice("testvg2", parents=[pv2, pv3]) cache_req = LVMCacheRequest(Size("512 MiB"), [pv3], "writethrough") self.cached_lv = LVMLogicalVolumeDevice("testcachedlv", parents=[vg2], fmt=blivet.formats.get_format("xfs"), exists=False, cache_request=cache_req)
def test_skip_activate(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB"), exists=True) vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True) lv = LVMLogicalVolumeDevice("data_lv", parents=[vg], size=Size("500 MiB"), exists=True) with patch("blivet.devices.lvm.blockdev.lvm") as lvm: with patch.object(lv, "_pre_setup"): lv.setup() self.assertTrue( lvm.lvactivate.called_with(vg.name, lv.lvname, ignore_skip=False)) lv.ignore_skip_activation += 1 with patch("blivet.devices.lvm.blockdev.lvm") as lvm: with patch.object(lv, "_pre_setup"): lv.setup() self.assertTrue( lvm.lvactivate.called_with(vg.name, lv.lvname, ignore_skip=True)) lv.ignore_skip_activation += 1 with patch("blivet.devices.lvm.blockdev.lvm") as lvm: with patch.object(lv, "_pre_setup"): lv.setup() self.assertTrue( lvm.lvactivate.called_with(vg.name, lv.lvname, ignore_skip=True)) lv.ignore_skip_activation -= 2 with patch("blivet.devices.lvm.blockdev.lvm") as lvm: with patch.object(lv, "_pre_setup"): lv.setup() self.assertTrue( lvm.lvactivate.called_with(vg.name, lv.lvname, ignore_skip=False))
def test_lvmthin_snap_shot_device_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) pool = LVMLogicalVolumeDevice("pool1", parents=[vg], size=Size("500 MiB"), seg_type="thin-pool") thinlv = LVMLogicalVolumeDevice("thinlv", parents=[pool], size=Size("200 MiB"), seg_type="thin") with self.assertRaisesRegex(ValueError, "lvm snapshot origin volume must already exist"): LVMLogicalVolumeDevice("snap1", parents=[pool], origin=thinlv, seg_type="thin") with self.assertRaisesRegex(ValueError, "lvm snapshot origin must be a logical volume"): LVMLogicalVolumeDevice("snap1", parents=[pool], origin=pv, seg_type="thin") # now make the constructor succeed so we can test some properties thinlv.exists = True snap1 = LVMLogicalVolumeDevice("snap1", parents=[pool], origin=thinlv, seg_type="thin") self.assertEqual(snap1.isleaf, True) self.assertEqual(snap1.direct, True) self.assertEqual(thinlv.isleaf, True) self.assertEqual(thinlv.direct, True) self.assertEqual(snap1.depends_on(thinlv), True) self.assertEqual(thinlv.depends_on(snap1), False) # existing thin snapshots do not depend on their origin snap1.exists = True self.assertEqual(snap1.depends_on(thinlv), False)
def test_target_size(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) orig_size = Size("800 MiB") lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=orig_size, fmt=blivet.formats.get_format("ext4"), exists=True) min_size = Size("200 MiB") lv.format.exists = True lv.format._min_instance_size = min_size lv.format._resizable = True # Make sure things are as expected to begin with. self.assertEqual(lv.min_size, min_size) self.assertEqual(lv.max_size, Size("1020 MiB")) self.assertEqual(lv.size, orig_size) # ValueError if size smaller than min_size with self.assertRaisesRegex(ValueError, "size.*smaller than the minimum"): lv.target_size = Size("1 MiB") # target size should be unchanged self.assertEqual(lv.target_size, orig_size) # ValueError if size larger than max_size with self.assertRaisesRegex(ValueError, "size.*larger than the maximum"): lv.target_size = Size("1 GiB") # target size should be unchanged self.assertEqual(lv.target_size, orig_size) # successful set of target size should also be reflected in size attr new_target = Size("900 MiB") lv.target_size = new_target self.assertEqual(lv.target_size, new_target) self.assertEqual(lv.size, new_target) # reset target size to original size lv.target_size = orig_size self.assertEqual(lv.target_size, orig_size) self.assertEqual(lv.size, orig_size)
def test_lvm_logical_volume_insuf_seg_type(self): # pylint: disable=unused-variable pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("513 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) # pvs have to be specified for non-linear LVs with self.assertRaises(ValueError): lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, seg_type="raid1") with self.assertRaises(ValueError): lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, seg_type="striped") # no or complete specification has to be given for linear LVs with self.assertRaises(ValueError): lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, pvs=[pv]) with self.assertRaises(ValueError): pv_spec = LVPVSpec(pv, Size("256 MiB")) pv_spec2 = LVPVSpec(pv2, Size("250 MiB")) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, pvs=[pv_spec, pv_spec2])
def test_lvm_vdo_pool(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB"), exists=True) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) pool = LVMLogicalVolumeDevice("testpool", parents=[vg], size=Size("512 MiB"), seg_type="vdo-pool", exists=True) self.assertTrue(pool.is_vdo_pool) free = vg.free_space lv = LVMLogicalVolumeDevice("testlv", parents=[pool], size=Size("2 GiB"), seg_type="vdo", exists=True) self.assertTrue(lv.is_vdo_lv) self.assertEqual(lv.vg, vg) self.assertEqual(lv.pool, pool) # free space in the vg shouldn't be affected by the vdo lv self.assertEqual(lv.vg_space_used, 0) self.assertEqual(free, vg.free_space) self.assertListEqual(pool.lvs, [lv]) # now try to destroy both the pool and the vdo lv # for the lv this should be a no-op, destroying the pool should destroy both with patch("blivet.devices.lvm.blockdev.lvm") as lvm: lv.destroy() lv.remove_hook() self.assertFalse(lv.exists) self.assertFalse(lvm.lvremove.called) self.assertListEqual(pool.lvs, []) pool.destroy() self.assertFalse(pool.exists) self.assertTrue(lvm.lvremove.called)
def setUp(self): disk1 = DiskDevice("testdisk", size=Size("300 GiB"), exists=True, fmt=get_format("disklabel", exists=True)) disk1.format._supported = False with self.assertLogs("blivet", level="INFO") as cm: partition1 = PartitionDevice("testpart1", size=Size("150 GiB"), exists=True, parents=[disk1], fmt=get_format("ext4", exists=True)) self.assertTrue("disklabel is unsupported" in "\n".join(cm.output)) with self.assertLogs("blivet", level="INFO") as cm: partition2 = PartitionDevice("testpart2", size=Size("100 GiB"), exists=True, parents=[disk1], fmt=get_format("lvmpv", exists=True)) self.assertTrue("disklabel is unsupported" in "\n".join(cm.output)) # To be supported, all of a devices ancestors must be supported. disk2 = DiskDevice("testdisk2", size=Size("300 GiB"), exists=True, fmt=get_format("lvmpv", exists=True)) vg = LVMVolumeGroupDevice("testvg", exists=True, parents=[partition2, disk2]) lv = LVMLogicalVolumeDevice("testlv", exists=True, size=Size("64 GiB"), parents=[vg], fmt=get_format("ext4", exists=True)) with sparsetmpfile("addparttest", Size("50 MiB")) as disk_file: disk3 = DiskFile(disk_file) disk3.format = get_format("disklabel", device=disk3.path, exists=False) self.disk1 = disk1 self.disk2 = disk2 self.disk3 = disk3 self.partition1 = partition1 self.partition2 = partition2 self.vg = vg self.lv = lv
def testLVMSnapShotDeviceInit(self): pv = StorageDevice("pv1", fmt=blivet.formats.getFormat("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=blivet.formats.getFormat("xfs")) with self.assertRaisesRegex( ValueError, "lvm snapshot devices require an origin lv"): LVMSnapShotDevice("snap1", parents=[vg]) with self.assertRaisesRegex( ValueError, "lvm snapshot origin volume must already exist"): LVMSnapShotDevice("snap1", parents=[vg], origin=lv) with self.assertRaisesRegex( ValueError, "lvm snapshot origin must be a logical volume"): LVMSnapShotDevice("snap1", parents=[vg], origin=pv) with self.assertRaisesRegex( ValueError, "only existing vorigin snapshots are supported"): LVMSnapShotDevice("snap1", parents=[vg], vorigin=True) lv.exists = True snap1 = LVMSnapShotDevice("snap1", parents=[vg], origin=lv) self.assertEqual(snap1.format, lv.format) snap1.format = blivet.formats.getFormat("DM_snapshot_cow", exists=True) self.assertEqual(snap1.format, lv.format) self.assertEqual(snap1.isleaf, True) self.assertEqual(snap1.direct, True) self.assertEqual(lv.isleaf, False) self.assertEqual(lv.direct, True) self.assertEqual(snap1.dependsOn(lv), True) self.assertEqual(lv.dependsOn(snap1), False)
def test_vg_is_empty(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1024 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) self.assertTrue(vg.is_empty) LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False) self.assertFalse(vg.is_empty)
def test_device_name(self): # check that devicetree.names property contains all device's names # mock lvs_info to avoid blockdev call allowing run as non-root with patch.object(LVsInfo, 'cache', new_callable=PropertyMock) as mock_lvs_cache: mock_lvs_cache.return_value = { "sdmock": "dummy", "testvg-testlv": "dummy" } tree = DeviceTree() dev_names = ["sda", "sdb", "sdc"] for dev_name in dev_names: dev = DiskDevice(dev_name, size=Size("1 GiB")) tree._add_device(dev) self.assertTrue(dev in tree.devices) self.assertTrue(dev.name in tree.names) dev.format = get_format("lvmpv", device=dev.path) vg = LVMVolumeGroupDevice("testvg", parents=[dev]) tree._add_device(vg) dev_names.append(vg.name) lv = LVMLogicalVolumeDevice("testlv", parents=[vg]) tree._add_device(lv) dev_names.append(lv.name) # frobnicate a bit with the hidden status of the devices: # * hide sda # * hide and unhide again sdb # * leave sdc unchanged tree.hide(tree.get_device_by_name("sda")) tree.hide(tree.get_device_by_name("sdb")) tree.unhide(tree.get_device_by_name("sdb", hidden=True)) # some lvs names may be already present in the system (mocked) lv_info = list(lvs_info.cache.keys()) # all devices should still be present in the tree.names self.assertEqual(set(tree.names), set(lv_info + dev_names)) # "remove" the LV, it should no longer be in the list tree.actions._actions.append( Mock(device=lv, type=ACTION_TYPE_DESTROY, obj=ACTION_OBJECT_DEVICE)) tree._remove_device(lv) self.assertFalse(lv.name in tree.names)
def test_lvm_logical_volume_with_pvs_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("512 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) pv_spec = LVPVSpec(pv, Size("1 GiB")) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"), fmt=blivet.formats.get_format("xfs"), exists=False, pvs=[pv_spec]) self.assertEqual([spec.pv for spec in lv._pv_specs], [pv])
def test_lvm_logical_volume_metadata_size(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("513 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, seg_type="raid1", pvs=[pv, pv2]) self.assertEqual(lv.metadata_size, Size("4 MiB")) # two copies of metadata self.assertEqual(lv.metadata_vg_space_used, Size("8 MiB"))
def test_lvm_logical_volume_pv_free_linear(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("513 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) pv_spec = LVPVSpec(pv, Size("256 MiB")) pv_spec2 = LVPVSpec(pv2, Size("256 MiB")) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, pvs=[pv_spec, pv_spec2]) self.assertEqual(lv.seg_type, "linear") self.assertEqual(pv.format.free, Size("768 MiB")) self.assertEqual(pv2.format.free, Size("256 MiB"))
def test_lvmcached_logical_volume_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("512 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) cache_req = LVMCacheRequest(Size("512 MiB"), [pv2], "writethrough") xfs_fmt = blivet.formats.get_format("xfs") lv = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=xfs_fmt, size=Size(xfs_fmt.min_size), exists=False, cache_request=cache_req) self.assertEqual(lv.size, xfs_fmt.min_size) # check that the LV behaves like a cached LV self.assertTrue(lv.cached) cache = lv.cache self.assertIsNotNone(cache) # the cache reserves space for its metadata from the requested size, but # it may require (and does in this case) a pmspare LV to be allocated self.assertEqual(lv.vg_space_used, lv.cache.size + lv.cache.md_size + lv.size) # check parameters reported by the (non-existing) cache # 512 MiB - 8 MiB (metadata) - 8 MiB (pmspare) self.assertEqual(cache.size, Size("496 MiB")) self.assertEqual(cache.md_size, Size("8 MiB")) self.assertEqual(cache.vg_space_used, Size("504 MiB")) self.assertIsInstance(cache.size, Size) self.assertIsInstance(cache.md_size, Size) self.assertIsInstance(cache.vg_space_used, Size) self.assertFalse(cache.exists) self.assertIsNone(cache.stats) self.assertEqual(cache.mode, "writethrough") self.assertIsNone(cache.backing_device_name) self.assertIsNone(cache.cache_device_name) self.assertEqual(set(cache.fast_pvs), set([pv2]))
def test_lvm_logical_volume_segtype_pv_free(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("513 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"), fmt=blivet.formats.get_format("xfs"), exists=False, seg_type="striped", pvs=[pv, pv2]) self.assertEqual(lv.seg_type, "striped") self.assertEqual(pv.format.free, Size("512 MiB")) self.assertEqual(pv2.format.free, 0)
def test_lvm_logical_volume_pv_free_cached(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("513 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) pv_spec = LVPVSpec(pv, Size("256 MiB")) pv_spec2 = LVPVSpec(pv2, Size("256 MiB")) cache_req = LVMCacheRequest(Size("512 MiB"), [pv], "writethrough") lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, cache_request=cache_req, pvs=[pv_spec, pv_spec2]) self.assertEqual(lv.seg_type, "linear") # 1024 MiB (free) - 256 MiB (LV part) - 504 MiB (cache shrank for pmspare space) self.assertEqual(pv.format.free, Size("264 MiB")) self.assertEqual(pv2.format.free, Size("256 MiB"))
def generate_device_factory_request_lvm_test(self, blockdev): pv1 = StorageDevice("pv1", size=Size("1025 MiB"), fmt=get_format("lvmpv")) pv2 = StorageDevice("pv2", size=Size("513 MiB"), fmt=get_format("lvmpv")) vg = LVMVolumeGroupDevice("testvg", parents=[pv1, pv2]) lv = LVMLogicalVolumeDevice("testlv", size=Size("512 MiB"), parents=[vg], fmt=get_format("xfs"), exists=False, seg_type="raid1", pvs=[pv1, pv2]) request = utils.generate_device_factory_request(self.storage, lv) self.assertEqual( DeviceFactoryRequest.to_structure(request), { "device-spec": get_variant(Str, "testvg-testlv"), "disks": get_variant(List[Str], []), "mount-point": get_variant(Str, ""), "reformat": get_variant(Bool, True), "format-type": get_variant(Str, "xfs"), "label": get_variant(Str, ""), "luks-version": get_variant(Str, ""), "device-type": get_variant(Int, devicefactory.DEVICE_TYPE_LVM), "device-name": get_variant(Str, "testlv"), "device-size": get_variant(UInt64, Size("508 MiB").get_bytes()), "device-encrypted": get_variant(Bool, False), "device-raid-level": get_variant(Str, ""), "container-spec": get_variant(Str, "testvg"), "container-name": get_variant(Str, "testvg"), "container-size-policy": get_variant(Int64, Size("1.5 GiB")), "container-encrypted": get_variant(Bool, False), "container-raid-level": get_variant(Str, ""), })
def get_snapshot_device(request, devicetree): """Get the ThinLV snapshot device. :param request: a snapshot request :param devicetree: a device tree to look up devices :return: a model of the ThinLV snapshot """ snap_name = request.name.replace('-', '--') origin = request.origin.replace('-', '--').replace('/', '-') origin_dev = devicetree.get_device_by_name(origin) log.debug("Snapshot: name %s has origin %s", request.name, origin_dev) if origin_dev is None: raise KickstartParseError( _("Snapshot: origin \"%s\" doesn't exists!") % request.origin, lineno=request.lineno) if not origin_dev.is_thin_lv: raise KickstartParseError( _("Snapshot: origin \"%(origin)s\" of snapshot " "\"%(name)s\" is not a valid thin LV device.") % { "origin": request.origin, "name": request.name }, lineno=request.lineno) if devicetree.get_device_by_name("%s-%s" % (origin_dev.vg.name, snap_name)): raise KickstartParseError(_("Snapshot %s already exists.") % request.name, lineno=request.lineno) try: return LVMLogicalVolumeDevice(name=request.name, parents=[origin_dev.pool], seg_type="thin", origin=origin_dev) except ValueError as e: raise KickstartParseError(str(e), lineno=request.lineno)
def test_lvm_logical_volume_mirror(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("513 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, seg_type="mirror", pvs=[pv, pv2]) self.assertEqual(lv.seg_type, "mirror") # 512 MiB - 4 MiB (metadata) self.assertEqual(lv.size, Size("508 MiB")) self.assertEqual(lv._raid_level, raid.RAID1) self.assertTrue(lv.is_raid_lv) self.assertEqual(lv._num_raid_pvs, 2)
def generate_device_factory_request_test(self, blockdev): device = StorageDevice("dev1") with self.assertRaises(UnsupportedDeviceError): utils.generate_device_factory_request(self.storage, device) disk = DiskDevice("dev2") request = utils.generate_device_factory_request(self.storage, disk) self.assertEqual(DeviceFactoryRequest.to_structure(request), { "device-spec": get_variant(Str, "dev2"), "disks": get_variant(List[Str], ["dev2"]), "mount-point": get_variant(Str, ""), "reformat": get_variant(Bool, False), "format-type": get_variant(Str, ""), "label": get_variant(Str, ""), "luks-version": get_variant(Str, ""), "device-type": get_variant(Int, devicefactory.DEVICE_TYPE_DISK), "device-name": get_variant(Str, "dev2"), "device-size": get_variant(UInt64, 0), "device-encrypted": get_variant(Bool, False), "device-raid-level": get_variant(Str, ""), "container-spec": get_variant(Str, ""), "container-name": get_variant(Str, ""), "container-size-policy": get_variant(Int64, devicefactory.SIZE_POLICY_AUTO), "container-encrypted": get_variant(Bool, False), "container-raid-level": get_variant(Str, ""), }) partition = PartitionDevice( "dev3", size=Size("5 GiB"), parents=[disk], fmt=get_format("ext4", mountpoint="/", label="root") ) request = utils.generate_device_factory_request(self.storage, partition) self.assertEqual(DeviceFactoryRequest.to_structure(request), { "device-spec": get_variant(Str, "dev3"), "disks": get_variant(List[Str], ["dev2"]), "mount-point": get_variant(Str, "/"), "reformat": get_variant(Bool, True), "format-type": get_variant(Str, "ext4"), "label": get_variant(Str, "root"), "luks-version": get_variant(Str, ""), "device-type": get_variant(Int, devicefactory.DEVICE_TYPE_PARTITION), "device-name": get_variant(Str, "dev3"), "device-size": get_variant(UInt64, Size("5 GiB").get_bytes()), "device-encrypted": get_variant(Bool, False), "device-raid-level": get_variant(Str, ""), "container-spec": get_variant(Str, ""), "container-name": get_variant(Str, ""), "container-size-policy": get_variant(Int64, devicefactory.SIZE_POLICY_AUTO), "container-encrypted": get_variant(Bool, False), "container-raid-level": get_variant(Str, ""), }) pv1 = StorageDevice( "pv1", size=Size("1025 MiB"), fmt=get_format("lvmpv") ) pv2 = StorageDevice( "pv2", size=Size("513 MiB"), fmt=get_format("lvmpv") ) vg = LVMVolumeGroupDevice( "testvg", parents=[pv1, pv2] ) lv = LVMLogicalVolumeDevice( "testlv", size=Size("512 MiB"), parents=[vg], fmt=get_format("xfs"), exists=False, seg_type="raid1", pvs=[pv1, pv2] ) request = utils.generate_device_factory_request(self.storage, lv) self.assertEqual(DeviceFactoryRequest.to_structure(request), { "device-spec": get_variant(Str, "testvg-testlv"), "disks": get_variant(List[Str], []), "mount-point": get_variant(Str, ""), "reformat": get_variant(Bool, True), "format-type": get_variant(Str, "xfs"), "label": get_variant(Str, ""), "luks-version": get_variant(Str, ""), "device-type": get_variant(Int, devicefactory.DEVICE_TYPE_LVM), "device-name": get_variant(Str, "testlv"), "device-size": get_variant(UInt64, Size("508 MiB").get_bytes()), "device-encrypted": get_variant(Bool, False), "device-raid-level": get_variant(Str, ""), "container-spec": get_variant(Str, "testvg"), "container-name": get_variant(Str, "testvg"), "container-size-policy": get_variant(Int64, Size("1.5 GiB")), "container-encrypted": get_variant(Bool, False), "container-raid-level": get_variant(Str, ""), })
def test_lvmthin_snap_shot_device_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) pool = LVMLogicalVolumeDevice("pool1", parents=[vg], size=Size("500 MiB"), seg_type="thin-pool") thinlv = LVMLogicalVolumeDevice("thinlv", parents=[pool], size=Size("200 MiB"), seg_type="thin") with self.assertRaisesRegex( ValueError, "lvm snapshot origin volume must already exist"): LVMLogicalVolumeDevice("snap1", parents=[pool], origin=thinlv, seg_type="thin") with self.assertRaisesRegex( ValueError, "lvm snapshot origin must be a logical volume"): LVMLogicalVolumeDevice("snap1", parents=[pool], origin=pv, seg_type="thin") # now make the constructor succeed so we can test some properties thinlv.exists = True snap1 = LVMLogicalVolumeDevice("snap1", parents=[pool], origin=thinlv, seg_type="thin") self.assertEqual(snap1.isleaf, True) self.assertEqual(snap1.direct, True) self.assertEqual(thinlv.isleaf, True) self.assertEqual(thinlv.direct, True) self.assertEqual(snap1.depends_on(thinlv), True) self.assertEqual(thinlv.depends_on(snap1), False) # existing thin snapshots do not depend on their origin snap1.exists = True self.assertEqual(snap1.depends_on(thinlv), False)
def test_new_lv_from_non_existing_lvs(self): # same test as above, just with non-existing LVs used to create the new one b = blivet.Blivet() pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB"), exists=True) vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True) lv1 = LVMLogicalVolumeDevice("data_lv", parents=[vg], size=Size("500 MiB"), exists=False) lv2 = LVMLogicalVolumeDevice("metadata_lv", parents=[vg], size=Size("50 MiB"), exists=False) for dev in (pv, vg, lv1, lv2): b.devicetree._add_device(dev) # check that all the above devices are in the expected places self.assertEqual(set(b.devices), {pv, vg, lv1, lv2}) self.assertEqual(set(b.vgs), {vg}) self.assertEqual(set(b.lvs), {lv1, lv2}) self.assertEqual(set(b.vgs[0].lvs), {lv1, lv2}) self.assertEqual(vg.size, Size("1020 MiB")) self.assertEqual(lv1.size, Size("500 MiB")) self.assertEqual(lv2.size, Size("50 MiB")) # combine the two LVs into a thin pool (the LVs should become its internal LVs) pool = b.new_lv_from_lvs(vg, name="pool", seg_type="thin-pool", from_lvs=(lv1, lv2)) # add the pool LV into the devicetree b.devicetree._add_device(pool) self.assertEqual(set(b.devices), {pv, vg, pool}) self.assertEqual(set(b.vgs), {vg}) self.assertEqual(set(b.lvs), {pool}) self.assertEqual(set(b.vgs[0].lvs), {pool}) self.assertEqual(set(b.vgs[0].lvs[0]._internal_lvs), {lv1, lv2}) self.assertTrue(lv1.is_internal_lv) self.assertEqual(lv1.int_lv_type, LVMInternalLVtype.data) self.assertEqual(lv1.size, Size("500 MiB")) self.assertTrue(lv2.is_internal_lv) self.assertEqual(lv2.int_lv_type, LVMInternalLVtype.meta) self.assertEqual(lv2.size, Size("50 MiB")) self.assertTrue(pool.depends_on(lv1)) self.assertTrue(pool.depends_on(lv2)) self.assertEqual(pool.name, "testvg-pool") self.assertEqual(pool.size, Size("500 MiB")) self.assertEqual(pool.metadata_size, Size("50 MiB")) self.assertIs(pool.vg, vg) # both component LVs don't exist with self.assertRaises(errors.DeviceError): with patch("blivet.devices.lvm.blockdev.lvm") as lvm: pool.create() # lv2 will still not exist lv1.exists = True with self.assertRaises(errors.DeviceError): with patch("blivet.devices.lvm.blockdev.lvm") as lvm: pool.create() # both component LVs exist, should just work lv2.exists = True with patch("blivet.devices.lvm.blockdev.lvm") as lvm: with patch.object(pool, "_pre_create"): pool.create() self.assertTrue(lvm.thpool_convert.called)
def test_vgchunk(self): pv = StorageDevice("pv1", size=Size("40 GiB"), fmt=get_format("lvmpv")) vg = LVMVolumeGroupDevice("vg", parents=[pv]) lv1 = LVMLogicalVolumeDevice("lv1", parents=[vg], size=Size("1 GiB"), grow=True) lv2 = LVMLogicalVolumeDevice("lv2", parents=[vg], size=Size("10 GiB"), grow=True) lv3 = LVMLogicalVolumeDevice("lv3", parents=[vg], size=Size("10 GiB"), grow=True, maxsize=Size("12 GiB")) req1 = LVRequest(lv1) req2 = LVRequest(lv2) req3 = LVRequest(lv3) chunk = VGChunk(vg, requests=[req1, req2, req3]) self.assertEqual(chunk.length, vg.extents) self.assertEqual(chunk.pool, vg.free_extents) base_size = vg.align(sum((lv.size for lv in vg.lvs), Size(0)), roundup=True) base = base_size / vg.pe_size self.assertEqual(chunk.base, base) # default extent size is 4 MiB self.assertEqual(chunk.length_to_size(4), Size("16 MiB")) self.assertEqual(chunk.size_to_length(Size("33 MiB")), 8) self.assertEqual(chunk.has_growable, True) self.assertEqual(chunk.remaining, 3) self.assertEqual(chunk.done, False) chunk.grow_requests() # the chunk is done growing since its pool has been exhausted self.assertEqual(chunk.done, True) # there are still two requests remaining since lv1 and lv2 have no max self.assertEqual(chunk.remaining, 2) # # validate the resulting growth # # lv1 has size 1 GiB (256 extents) and is growable with no limit # lv2 has size 10 GiB (2560 extents) and is growable with no limit # lv3 has size 10 GiB (2560 extents) and is growable with a max size of # 12 GiB (max growth of 512 extents) # # The vg initially has 4863 free extents. # The growth ratio should be 1:10:10. # # The first pass through should allocate 231 extents to lv1 and 2315 # extents to each of lv2 and lv3, leaving one remaining extent, but # it should reclaim 1803 extents from lv3 since it has a maximum growth # of 512 extents (2 GiB). # # The second pass should then split up the remaining 1805 extents # between lv1 and lv2 at a ratio of 1:10, which ends up being 164 for # lv1 and 1640 for lv2. The remaining extent goes to lv2 because it is # first in the list after sorting with blivet.partitioning.lv_compare. # # Grand totals should be as follows: # lv1 should grow by 395 extents, or 1.54 GiB # lv2 should grow by 3956 extents, or 15.45 GiB # lv3 should grow by 512 extents, or 2 GiB self.assertEqual(req1.growth, 395) self.assertEqual(req2.growth, 3956) self.assertEqual(req3.growth, 512)