def testVolumeGroup(self): good_names = ['vg00', 'group-name', 'groupname-'] bad_names = ['-leading-hyphen', 'únicode', 'sp aces'] for name in good_names: self.assertTrue(LVMVolumeGroupDevice.isNameValid(name)) for name in bad_names: self.assertFalse(LVMVolumeGroupDevice.isNameValid(name))
def _createVG(vgName, deviceList, stripeSize=0): if stripeSize: vg = LVMVolumeGroupDevice(vgName, peSize=blivet.size.Size('%s KiB' % stripeSize), parents=deviceList) else: vg = LVMVolumeGroupDevice(vgName, parents=deviceList) blivetEnv.createDevice(vg) return vg
def test_volume_group(self): good_names = ['vg00', 'group-name', 'groupname-'] bad_names = ['-leading-hyphen', 'únicode', 'sp aces'] pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) for name in good_names: self.assertTrue(vg.is_name_valid(name)) for name in bad_names: self.assertFalse(vg.is_name_valid(name))
def test_new_lv_from_lvs(self): b = blivet.Blivet() pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB"), exists=True) vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True) lv1 = LVMLogicalVolumeDevice("data_lv", parents=[vg], size=Size("500 MiB"), exists=True) lv2 = LVMLogicalVolumeDevice("metadata_lv", parents=[vg], size=Size("50 MiB"), exists=True) for dev in (pv, vg, lv1, lv2): b.devicetree._add_device(dev) # check that all the above devices are in the expected places self.assertEqual(set(b.devices), {pv, vg, lv1, lv2}) self.assertEqual(set(b.vgs), {vg}) self.assertEqual(set(b.lvs), {lv1, lv2}) self.assertEqual(set(b.vgs[0].lvs), {lv1, lv2}) self.assertEqual(vg.size, Size("1020 MiB")) self.assertEqual(lv1.size, Size("500 MiB")) self.assertEqual(lv2.size, Size("50 MiB")) # combine the two LVs into a thin pool (the LVs should become its internal LVs) pool = b.new_lv_from_lvs(vg, name="pool", seg_type="thin-pool", from_lvs=(lv1, lv2)) # add the pool LV into the devicetree b.devicetree._add_device(pool) self.assertEqual(set(b.devices), {pv, vg, pool}) self.assertEqual(set(b.vgs), {vg}) self.assertEqual(set(b.lvs), {pool}) self.assertEqual(set(b.vgs[0].lvs), {pool}) self.assertEqual(set(b.vgs[0].lvs[0]._internal_lvs), {lv1, lv2}) self.assertTrue(lv1.is_internal_lv) self.assertEqual(lv1.int_lv_type, LVMInternalLVtype.data) self.assertEqual(lv1.size, Size("500 MiB")) self.assertTrue(lv2.is_internal_lv) self.assertEqual(lv2.int_lv_type, LVMInternalLVtype.meta) self.assertEqual(lv2.size, Size("50 MiB")) self.assertEqual(pool.name, "testvg-pool") self.assertEqual(pool.size, Size("500 MiB")) self.assertEqual(pool.metadata_size, Size("50 MiB")) self.assertIs(pool.vg, vg) with patch("blivet.devices.lvm.blockdev.lvm") as lvm: with patch.object(pool, "_pre_create"): pool.create() self.assertTrue(lvm.thpool_convert.called)
def setUp(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) self.lv = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=blivet.formats.get_format("xfs")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) pv3 = StorageDevice("pv3", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg2 = LVMVolumeGroupDevice("testvg2", parents=[pv2, pv3]) cache_req = LVMCacheRequest(Size("512 MiB"), [pv3], "writethrough") self.cached_lv = LVMLogicalVolumeDevice("testcachedlv", parents=[vg2], fmt=blivet.formats.get_format("xfs"), exists=False, cache_request=cache_req)
def test_lvmcached_two_logical_volume_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("512 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) cache_req = LVMCacheRequest(Size("256 MiB"), [pv2], "writethrough") lv1 = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=blivet.formats.get_format("xfs"), exists=False, cache_request=cache_req) cache_req = LVMCacheRequest(Size("256 MiB"), [pv2], "writethrough") lv2 = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=blivet.formats.get_format("xfs"), exists=False, cache_request=cache_req) cache = lv1.cache self.assertIsNotNone(cache) # 256 MiB - 8 MiB (metadata) - 8 MiB (pmspare) self.assertEqual(cache.size, Size("240 MiB")) cache = lv2.cache self.assertIsNotNone(cache) # already have pmspare space reserved for lv1's cache (and shared) # 256 MiB - 8 MiB (metadata) [no pmspare] self.assertEqual(cache.size, Size("248 MiB"))
def test_get_related_disks(self): tree = DeviceTree() sda = DiskDevice("sda", size=Size('300g')) sdb = DiskDevice("sdb", size=Size('300g')) sdc = DiskDevice("sdc", size=Size('300G')) tree._add_device(sda) tree._add_device(sdb) tree._add_device(sdc) self.assertTrue(sda in tree.devices) self.assertTrue(sdb in tree.devices) self.assertTrue(sdc in tree.devices) sda.format = get_format("lvmpv", device=sda.path) sdb.format = get_format("lvmpv", device=sdb.path) vg = LVMVolumeGroupDevice("relvg", parents=[sda, sdb]) tree._add_device(vg) self.assertEqual(tree.get_related_disks(sda), set([sda, sdb])) self.assertEqual(tree.get_related_disks(sdb), set([sda, sdb])) self.assertEqual(tree.get_related_disks(sdc), set()) tree.hide(sda) self.assertEqual(tree.get_related_disks(sda), set([sda, sdb])) self.assertEqual(tree.get_related_disks(sdb), set([sda, sdb])) tree.hide(sdb) self.assertEqual(tree.get_related_disks(sda), set([sda, sdb])) self.assertEqual(tree.get_related_disks(sdb), set([sda, sdb])) tree.unhide(sda) self.assertEqual(tree.get_related_disks(sda), set([sda, sdb])) self.assertEqual(tree.get_related_disks(sdb), set([sda, sdb]))
def test_lvmsnap_shot_device_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=blivet.formats.get_format("xfs")) with self.assertRaisesRegex( ValueError, "lvm snapshot origin volume must already exist"): LVMLogicalVolumeDevice("snap1", parents=[vg], origin=lv) with self.assertRaisesRegex( ValueError, "lvm snapshot origin must be a logical volume"): LVMLogicalVolumeDevice("snap1", parents=[vg], origin=pv) with self.assertRaisesRegex( ValueError, "only existing vorigin snapshots are supported"): LVMLogicalVolumeDevice("snap1", parents=[vg], vorigin=True) lv.exists = True snap1 = LVMLogicalVolumeDevice("snap1", parents=[vg], origin=lv) self.assertEqual(snap1.format.type, lv.format.type) lv.format = blivet.formats.get_format("DM_snapshot_cow", exists=True) self.assertEqual(snap1.format.type, lv.format.type) self.assertEqual(snap1.isleaf, True) self.assertEqual(snap1.direct, True) self.assertEqual(lv.isleaf, False) self.assertEqual(lv.direct, True) self.assertEqual(snap1.depends_on(lv), True) self.assertEqual(lv.depends_on(snap1), False)
def test_lvm_logical_volume_segtype_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("513 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) with self.assertRaises(ValueError): lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"), fmt=blivet.formats.get_format("xfs"), exists=False, seg_type="raid8", pvs=[pv, pv2]) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"), fmt=blivet.formats.get_format("xfs"), exists=False, seg_type="striped", pvs=[pv, pv2]) self.assertEqual(lv.seg_type, "striped")
def check_completeness_test(self): """Test CheckCompleteness.""" dev1 = StorageDevice("dev1", fmt=get_format("ext4"), size=Size("10 GiB"), exists=True) dev2 = MDRaidArrayDevice(name="dev2", size=Size("500 MiB"), level=1, member_devices=2, total_devices=2, exists=True) dev3 = LVMVolumeGroupDevice("dev3", pv_count=2, exists=True) self._add_device(dev1) self._add_device(dev2) self._add_device(dev3) self._check_report(self.interface.CheckCompleteness("dev1")) self._check_report( self.interface.CheckCompleteness("dev2"), "This Software RAID array is missing 2 of 2 member partitions. " "You can remove it or select a different device.") self._check_report( self.interface.CheckCompleteness("dev3"), "This LVM Volume Group is missing 2 of 2 physical volumes. " "You can remove it or select a different device.") dev1.complete = False self._check_report( self.interface.CheckCompleteness("dev1"), "This blivet device is missing member devices. " "You can remove it or select a different device.")
def update_container_data_test(self): """Test UpdateContainerData.""" pv1 = StorageDevice( "pv1", size=Size("1025 MiB"), fmt=get_format("lvmpv") ) pv2 = StorageDevice( "pv2", size=Size("513 MiB"), fmt=get_format("lvmpv") ) vg = LVMVolumeGroupDevice( "testvg", parents=[pv1, pv2] ) self._add_device(pv1) self._add_device(pv2) self._add_device(vg) request = DeviceFactoryRequest() request.device_type = DEVICE_TYPE_PARTITION with self.assertRaises(StorageError): self.interface.UpdateContainerData( DeviceFactoryRequest.to_structure(request), "anaconda" ) request.device_type = DEVICE_TYPE_BTRFS request = DeviceFactoryRequest.from_structure( self.interface.UpdateContainerData( DeviceFactoryRequest.to_structure(request), "anaconda" ) ) self.assertEqual(request.container_spec, "") self.assertEqual(request.container_name, "anaconda") self.assertEqual(request.container_encrypted, False) self.assertEqual(request.container_raid_level, "single") self.assertEqual(request.container_size_policy, 0) self.assertEqual(request.disks, []) request.device_type = DEVICE_TYPE_LVM request = DeviceFactoryRequest.from_structure( self.interface.UpdateContainerData( DeviceFactoryRequest.to_structure(request), "testvg" ) ) self.assertEqual(request.container_spec, "testvg") self.assertEqual(request.container_name, "testvg") self.assertEqual(request.container_encrypted, False) self.assertEqual(request.container_raid_level, "") self.assertEqual(request.container_size_policy, Size("1.5 GiB").get_bytes()) self.assertEqual(request.disks, [])
def setUp(self): disk1 = DiskDevice("testdisk", size=Size("300 GiB"), exists=True, fmt=get_format("disklabel", exists=True)) disk1.format._supported = False with self.assertLogs("blivet", level="INFO") as cm: partition1 = PartitionDevice("testpart1", size=Size("150 GiB"), exists=True, parents=[disk1], fmt=get_format("ext4", exists=True)) self.assertTrue("disklabel is unsupported" in "\n".join(cm.output)) with self.assertLogs("blivet", level="INFO") as cm: partition2 = PartitionDevice("testpart2", size=Size("100 GiB"), exists=True, parents=[disk1], fmt=get_format("lvmpv", exists=True)) self.assertTrue("disklabel is unsupported" in "\n".join(cm.output)) # To be supported, all of a devices ancestors must be supported. disk2 = DiskDevice("testdisk2", size=Size("300 GiB"), exists=True, fmt=get_format("lvmpv", exists=True)) vg = LVMVolumeGroupDevice("testvg", exists=True, parents=[partition2, disk2]) lv = LVMLogicalVolumeDevice("testlv", exists=True, size=Size("64 GiB"), parents=[vg], fmt=get_format("ext4", exists=True)) with sparsetmpfile("addparttest", Size("50 MiB")) as disk_file: disk3 = DiskFile(disk_file) disk3.format = get_format("disklabel", device=disk3.path, exists=False) self.disk1 = disk1 self.disk2 = disk2 self.disk3 = disk3 self.partition1 = partition1 self.partition2 = partition2 self.vg = vg self.lv = lv
def test_vg_is_empty(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1024 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) self.assertTrue(vg.is_empty) LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False) self.assertFalse(vg.is_empty)
def test_vgchunk_with_cache_pvfree(self): pv = StorageDevice("pv1", size=Size("40 GiB"), fmt=get_format("lvmpv")) # 1069 MiB so that the PV provides 1068 MiB of free space (see # LVMVolumeGroupDevice.extents) which is 44 MiB more than the caches # need and which should thus be split into the LVs pv2 = StorageDevice("pv2", size=Size("1069 MiB"), fmt=get_format("lvmpv")) vg = LVMVolumeGroupDevice("vg", parents=[pv, pv2]) cache_req1 = LVMCacheRequest(Size("512 MiB"), [pv2], "writethrough") lv1 = LVMLogicalVolumeDevice("lv1", parents=[vg], size=Size("1 GiB"), grow=True, cache_request=cache_req1) cache_req2 = LVMCacheRequest(Size("512 MiB"), [pv2], "writethrough") lv2 = LVMLogicalVolumeDevice("lv2", parents=[vg], size=Size("10 GiB"), grow=True, cache_request=cache_req2) lv3 = LVMLogicalVolumeDevice("lv3", parents=[vg], size=Size("10 GiB"), grow=True, maxsize=Size("12 GiB")) req1 = LVRequest(lv1) req2 = LVRequest(lv2) req3 = LVRequest(lv3) chunk = VGChunk(vg, requests=[req1, req2, req3]) chunk.grow_requests() # the chunk is done growing since its pool has been exhausted self.assertEqual(chunk.done, True) # there are still two requests remaining since lv1 and lv2 have no max self.assertEqual(chunk.remaining, 2) # All the sizes should be the same as without the caches (see the # test_vgchunk test for their "rationales") because the space for the # caches should just be reserved. # The extra 11 extents available on the pv2 should go in the 1:10 ratio # to req1 and req2. self.assertEqual(req1.growth, 395 + 1) self.assertEqual(req2.growth, 3956 + 10) self.assertEqual(req3.growth, 512)
def test_device_name(self): # check that devicetree.names property contains all device's names # mock lvs_info to avoid blockdev call allowing run as non-root with patch.object(LVsInfo, 'cache', new_callable=PropertyMock) as mock_lvs_cache: mock_lvs_cache.return_value = { "sdmock": "dummy", "testvg-testlv": "dummy" } tree = DeviceTree() dev_names = ["sda", "sdb", "sdc"] for dev_name in dev_names: dev = DiskDevice(dev_name, size=Size("1 GiB")) tree._add_device(dev) self.assertTrue(dev in tree.devices) self.assertTrue(dev.name in tree.names) dev.format = get_format("lvmpv", device=dev.path) vg = LVMVolumeGroupDevice("testvg", parents=[dev]) tree._add_device(vg) dev_names.append(vg.name) lv = LVMLogicalVolumeDevice("testlv", parents=[vg]) tree._add_device(lv) dev_names.append(lv.name) # frobnicate a bit with the hidden status of the devices: # * hide sda # * hide and unhide again sdb # * leave sdc unchanged tree.hide(tree.get_device_by_name("sda")) tree.hide(tree.get_device_by_name("sdb")) tree.unhide(tree.get_device_by_name("sdb", hidden=True)) # some lvs names may be already present in the system (mocked) lv_info = list(lvs_info.cache.keys()) # all devices should still be present in the tree.names self.assertEqual(set(tree.names), set(lv_info + dev_names)) # "remove" the LV, it should no longer be in the list tree.actions._actions.append( Mock(device=lv, type=ACTION_TYPE_DESTROY, obj=ACTION_OBJECT_DEVICE)) tree._remove_device(lv) self.assertFalse(lv.name in tree.names)
def test_lvm_inconsistent_sector_size(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1024 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("1024 MiB")) with patch("blivet.devices.StorageDevice.sector_size", new_callable=PropertyMock) as mock_property: mock_property.__get__ = lambda _mock, pv, _class: 512 if pv.name == "pv1" else 4096 with six.assertRaisesRegex( self, ValueError, "The volume group testvg cannot be created."): LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
def test_target_size(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) orig_size = Size("800 MiB") lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=orig_size, fmt=blivet.formats.get_format("ext4"), exists=True) min_size = Size("200 MiB") lv.format.exists = True lv.format._min_instance_size = min_size lv.format._resizable = True # Make sure things are as expected to begin with. self.assertEqual(lv.min_size, min_size) self.assertEqual(lv.max_size, Size("1020 MiB")) self.assertEqual(lv.size, orig_size) # ValueError if size smaller than min_size with self.assertRaisesRegex(ValueError, "size.*smaller than the minimum"): lv.target_size = Size("1 MiB") # target size should be unchanged self.assertEqual(lv.target_size, orig_size) # ValueError if size larger than max_size with self.assertRaisesRegex(ValueError, "size.*larger than the maximum"): lv.target_size = Size("1 GiB") # target size should be unchanged self.assertEqual(lv.target_size, orig_size) # successful set of target size should also be reflected in size attr new_target = Size("900 MiB") lv.target_size = new_target self.assertEqual(lv.target_size, new_target) self.assertEqual(lv.size, new_target) # reset target size to original size lv.target_size = orig_size self.assertEqual(lv.target_size, orig_size) self.assertEqual(lv.size, orig_size)
def test_get_container_free_space(self): """Test GetContainerFreeSpace.""" dev1 = StorageDevice("dev1", fmt=get_format("lvmpv"), size=Size("10 GiB")) dev2 = LVMVolumeGroupDevice("dev2", parents=[dev1]) self._add_device(dev1) self._add_device(dev2) free_space = self.interface.GetContainerFreeSpace("dev1") assert free_space == 0 free_space = self.interface.GetContainerFreeSpace("dev2") assert free_space > Size("9 GiB").get_bytes() assert free_space < Size("10 GiB").get_bytes()
def get_container_free_space_test(self): """Test GetContainerFreeSpace.""" dev1 = StorageDevice("dev1", fmt=get_format("lvmpv"), size=Size("10 GiB")) dev2 = LVMVolumeGroupDevice("dev2", parents=[dev1]) self._add_device(dev1) self._add_device(dev2) free_space = self.interface.GetContainerFreeSpace("dev1") self.assertEqual(free_space, 0) free_space = self.interface.GetContainerFreeSpace("dev2") self.assertGreater(free_space, Size("9 GiB").get_bytes()) self.assertLess(free_space, Size("10 GiB").get_bytes())
def test_logical_volume(self): good_names = ['lv00', 'volume-name', 'volumename-'] bad_names = ['-leading-hyphen', 'únicode', 'sp aces', 'snapshot47', 'pvmove0', 'sub_tmetastring'] pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=blivet.formats.get_format("xfs")) for name in good_names: self.assertTrue(lv.is_name_valid(name)) for name in bad_names: self.assertFalse(lv.is_name_valid(name))
def test_lvmthin_snap_shot_device_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv]) pool = LVMLogicalVolumeDevice("pool1", parents=[vg], size=Size("500 MiB"), seg_type="thin-pool") thinlv = LVMLogicalVolumeDevice("thinlv", parents=[pool], size=Size("200 MiB"), seg_type="thin") with self.assertRaisesRegex( ValueError, "lvm snapshot origin volume must already exist"): LVMLogicalVolumeDevice("snap1", parents=[pool], origin=thinlv, seg_type="thin") with self.assertRaisesRegex( ValueError, "lvm snapshot origin must be a logical volume"): LVMLogicalVolumeDevice("snap1", parents=[pool], origin=pv, seg_type="thin") # now make the constructor succeed so we can test some properties thinlv.exists = True snap1 = LVMLogicalVolumeDevice("snap1", parents=[pool], origin=thinlv, seg_type="thin") self.assertEqual(snap1.isleaf, True) self.assertEqual(snap1.direct, True) self.assertEqual(thinlv.isleaf, True) self.assertEqual(thinlv.direct, True) self.assertEqual(snap1.depends_on(thinlv), True) self.assertEqual(thinlv.depends_on(snap1), False) # existing thin snapshots do not depend on their origin snap1.exists = True self.assertEqual(snap1.depends_on(thinlv), False)
def test_skip_activate(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB"), exists=True) vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True) lv = LVMLogicalVolumeDevice("data_lv", parents=[vg], size=Size("500 MiB"), exists=True) with patch("blivet.devices.lvm.blockdev.lvm") as lvm: with patch.object(lv, "_pre_setup"): lv.setup() self.assertTrue( lvm.lvactivate.called_with(vg.name, lv.lvname, ignore_skip=False)) lv.ignore_skip_activation += 1 with patch("blivet.devices.lvm.blockdev.lvm") as lvm: with patch.object(lv, "_pre_setup"): lv.setup() self.assertTrue( lvm.lvactivate.called_with(vg.name, lv.lvname, ignore_skip=True)) lv.ignore_skip_activation += 1 with patch("blivet.devices.lvm.blockdev.lvm") as lvm: with patch.object(lv, "_pre_setup"): lv.setup() self.assertTrue( lvm.lvactivate.called_with(vg.name, lv.lvname, ignore_skip=True)) lv.ignore_skip_activation -= 2 with patch("blivet.devices.lvm.blockdev.lvm") as lvm: with patch.object(lv, "_pre_setup"): lv.setup() self.assertTrue( lvm.lvactivate.called_with(vg.name, lv.lvname, ignore_skip=False))
def test_lvm_logical_volume_with_pvs_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("512 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) pv_spec = LVPVSpec(pv, Size("1 GiB")) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"), fmt=blivet.formats.get_format("xfs"), exists=False, pvs=[pv_spec]) self.assertEqual([spec.pv for spec in lv._pv_specs], [pv])
def _createVG(vgName, deviceList, stripeSize=0): if stripeSize: # bz#1198568: Blivet always creates vg with 1MB stripe size # Workaround: Till blivet fixes the issue, use vgcreate command devices = ','.join([device.path for device in deviceList]) rc, out, err = utils.execCmd([ _vgCreateCommandPath.cmd, '-s', '%sk' % stripeSize, vgName, devices ]) if rc: raise ge.GlusterHostStorageDeviceVGCreateFailedException( vgName, devices, stripeSize, rc, out, err) blivetEnv.reset() vg = blivetEnv.devicetree.getDeviceByName(vgName) else: vg = LVMVolumeGroupDevice(vgName, parents=deviceList) blivetEnv.createDevice(vg) return vg
def test_update_container_data(self): """Test UpdateContainerData.""" pv1 = StorageDevice("pv1", size=Size("1025 MiB"), fmt=get_format("lvmpv")) pv2 = StorageDevice("pv2", size=Size("513 MiB"), fmt=get_format("lvmpv")) vg = LVMVolumeGroupDevice("testvg", parents=[pv1, pv2]) self._add_device(pv1) self._add_device(pv2) self._add_device(vg) request = DeviceFactoryRequest() request.device_type = DEVICE_TYPE_PARTITION with pytest.raises(StorageError): self.interface.UpdateContainerData( DeviceFactoryRequest.to_structure(request), "anaconda") request.device_type = DEVICE_TYPE_BTRFS request = DeviceFactoryRequest.from_structure( self.interface.UpdateContainerData( DeviceFactoryRequest.to_structure(request), "anaconda")) assert request.container_spec == "" assert request.container_name == "anaconda" assert request.container_encrypted is False assert request.container_raid_level == "single" assert request.container_size_policy == 0 assert request.disks == [] request.device_type = DEVICE_TYPE_LVM request = DeviceFactoryRequest.from_structure( self.interface.UpdateContainerData( DeviceFactoryRequest.to_structure(request), "testvg")) assert request.container_spec == "testvg" assert request.container_name == "testvg" assert request.container_encrypted is False assert request.container_raid_level == "" assert request.container_size_policy == Size("1.5 GiB").get_bytes() assert request.disks == []
def test_lvmcached_logical_volume_init(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1 GiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("512 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) cache_req = LVMCacheRequest(Size("512 MiB"), [pv2], "writethrough") xfs_fmt = blivet.formats.get_format("xfs") lv = LVMLogicalVolumeDevice("testlv", parents=[vg], fmt=xfs_fmt, size=Size(xfs_fmt.min_size), exists=False, cache_request=cache_req) self.assertEqual(lv.size, xfs_fmt.min_size) # check that the LV behaves like a cached LV self.assertTrue(lv.cached) cache = lv.cache self.assertIsNotNone(cache) # the cache reserves space for its metadata from the requested size, but # it may require (and does in this case) a pmspare LV to be allocated self.assertEqual(lv.vg_space_used, lv.cache.size + lv.cache.md_size + lv.size) # check parameters reported by the (non-existing) cache # 512 MiB - 8 MiB (metadata) - 8 MiB (pmspare) self.assertEqual(cache.size, Size("496 MiB")) self.assertEqual(cache.md_size, Size("8 MiB")) self.assertEqual(cache.vg_space_used, Size("504 MiB")) self.assertIsInstance(cache.size, Size) self.assertIsInstance(cache.md_size, Size) self.assertIsInstance(cache.vg_space_used, Size) self.assertFalse(cache.exists) self.assertIsNone(cache.stats) self.assertEqual(cache.mode, "writethrough") self.assertIsNone(cache.backing_device_name) self.assertIsNone(cache.cache_device_name) self.assertEqual(set(cache.fast_pvs), set([pv2]))
def test_lvm_logical_volume_metadata_size(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("513 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, seg_type="raid1", pvs=[pv, pv2]) self.assertEqual(lv.metadata_size, Size("4 MiB")) # two copies of metadata self.assertEqual(lv.metadata_vg_space_used, Size("8 MiB"))
def test_lvm_logical_volume_insuf_seg_type(self): # pylint: disable=unused-variable pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("513 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) # pvs have to be specified for non-linear LVs with self.assertRaises(ValueError): lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, seg_type="raid1") with self.assertRaises(ValueError): lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, seg_type="striped") # no or complete specification has to be given for linear LVs with self.assertRaises(ValueError): lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, pvs=[pv]) with self.assertRaises(ValueError): pv_spec = LVPVSpec(pv, Size("256 MiB")) pv_spec2 = LVPVSpec(pv2, Size("250 MiB")) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, pvs=[pv_spec, pv_spec2])
def test_lvm_logical_volume_pv_free_linear(self): pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("1025 MiB")) pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), size=Size("513 MiB")) vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) pv_spec = LVPVSpec(pv, Size("256 MiB")) pv_spec2 = LVPVSpec(pv2, Size("256 MiB")) lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"), fmt=blivet.formats.get_format("xfs"), exists=False, pvs=[pv_spec, pv_spec2]) self.assertEqual(lv.seg_type, "linear") self.assertEqual(pv.format.free, Size("768 MiB")) self.assertEqual(pv2.format.free, Size("256 MiB"))
def test_new_vdo_pool(self): b = blivet.Blivet() pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), size=Size("10 GiB"), exists=True) vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True) for dev in (pv, vg): b.devicetree._add_device(dev) # check that all the above devices are in the expected places self.assertEqual(set(b.devices), {pv, vg}) self.assertEqual(set(b.vgs), {vg}) self.assertEqual(vg.size, Size("10236 MiB")) with self.assertRaises(ValueError): vdopool = b.new_lv(name="vdopool", vdo_pool=True, parents=[vg], compression=True, deduplication=True, size=blivet.size.Size("1 GiB")) vdopool = b.new_lv(name="vdopool", vdo_pool=True, parents=[vg], compression=True, deduplication=True, size=blivet.size.Size("8 GiB")) vdolv = b.new_lv(name="vdolv", vdo_lv=True, parents=[vdopool], size=blivet.size.Size("40 GiB")) b.create_device(vdopool) b.create_device(vdolv) self.assertEqual(vdopool.children[0], vdolv) self.assertEqual(vdolv.parents[0], vdopool) self.assertListEqual(vg.lvs, [vdopool, vdolv])
def test_vgchunk(self): pv = StorageDevice("pv1", size=Size("40 GiB"), fmt=get_format("lvmpv")) vg = LVMVolumeGroupDevice("vg", parents=[pv]) lv1 = LVMLogicalVolumeDevice("lv1", parents=[vg], size=Size("1 GiB"), grow=True) lv2 = LVMLogicalVolumeDevice("lv2", parents=[vg], size=Size("10 GiB"), grow=True) lv3 = LVMLogicalVolumeDevice("lv3", parents=[vg], size=Size("10 GiB"), grow=True, maxsize=Size("12 GiB")) req1 = LVRequest(lv1) req2 = LVRequest(lv2) req3 = LVRequest(lv3) chunk = VGChunk(vg, requests=[req1, req2, req3]) self.assertEqual(chunk.length, vg.extents) self.assertEqual(chunk.pool, vg.free_extents) base_size = vg.align(sum((lv.size for lv in vg.lvs), Size(0)), roundup=True) base = base_size / vg.pe_size self.assertEqual(chunk.base, base) # default extent size is 4 MiB self.assertEqual(chunk.length_to_size(4), Size("16 MiB")) self.assertEqual(chunk.size_to_length(Size("33 MiB")), 8) self.assertEqual(chunk.has_growable, True) self.assertEqual(chunk.remaining, 3) self.assertEqual(chunk.done, False) chunk.grow_requests() # the chunk is done growing since its pool has been exhausted self.assertEqual(chunk.done, True) # there are still two requests remaining since lv1 and lv2 have no max self.assertEqual(chunk.remaining, 2) # # validate the resulting growth # # lv1 has size 1 GiB (256 extents) and is growable with no limit # lv2 has size 10 GiB (2560 extents) and is growable with no limit # lv3 has size 10 GiB (2560 extents) and is growable with a max size of # 12 GiB (max growth of 512 extents) # # The vg initially has 4863 free extents. # The growth ratio should be 1:10:10. # # The first pass through should allocate 231 extents to lv1 and 2315 # extents to each of lv2 and lv3, leaving one remaining extent, but # it should reclaim 1803 extents from lv3 since it has a maximum growth # of 512 extents (2 GiB). # # The second pass should then split up the remaining 1805 extents # between lv1 and lv2 at a ratio of 1:10, which ends up being 164 for # lv1 and 1640 for lv2. The remaining extent goes to lv2 because it is # first in the list after sorting with blivet.partitioning.lv_compare. # # Grand totals should be as follows: # lv1 should grow by 395 extents, or 1.54 GiB # lv2 should grow by 3956 extents, or 15.45 GiB # lv3 should grow by 512 extents, or 2 GiB self.assertEqual(req1.growth, 395) self.assertEqual(req2.growth, 3956) self.assertEqual(req3.growth, 512)