コード例 #1
0
    def test_lvmcached_two_logical_volume_init(self):
        pv = StorageDevice("pv1",
                           fmt=blivet.formats.get_format("lvmpv"),
                           size=Size("1 GiB"))
        pv2 = StorageDevice("pv2",
                            fmt=blivet.formats.get_format("lvmpv"),
                            size=Size("512 MiB"))
        vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2])

        cache_req = LVMCacheRequest(Size("256 MiB"), [pv2], "writethrough")
        lv1 = LVMLogicalVolumeDevice("testlv",
                                     parents=[vg],
                                     fmt=blivet.formats.get_format("xfs"),
                                     exists=False,
                                     cache_request=cache_req)

        cache_req = LVMCacheRequest(Size("256 MiB"), [pv2], "writethrough")
        lv2 = LVMLogicalVolumeDevice("testlv",
                                     parents=[vg],
                                     fmt=blivet.formats.get_format("xfs"),
                                     exists=False,
                                     cache_request=cache_req)

        cache = lv1.cache
        self.assertIsNotNone(cache)
        # 256 MiB - 8 MiB (metadata) - 8 MiB (pmspare)
        self.assertEqual(cache.size, Size("240 MiB"))

        cache = lv2.cache
        self.assertIsNotNone(cache)
        # already have pmspare space reserved for lv1's cache (and shared)
        # 256 MiB - 8 MiB (metadata) [no pmspare]
        self.assertEqual(cache.size, Size("248 MiB"))
コード例 #2
0
ファイル: partitioning_test.py プロジェクト: vathpela/blivet
    def test_vgchunk_with_cache_pvfree(self):
        pv = StorageDevice("pv1", size=Size("40 GiB"), fmt=get_format("lvmpv"))
        # 1069 MiB so that the PV provides 1068 MiB of free space (see
        # LVMVolumeGroupDevice.extents) which is 44 MiB more than the caches
        # need and which should thus be split into the LVs
        pv2 = StorageDevice("pv2",
                            size=Size("1069 MiB"),
                            fmt=get_format("lvmpv"))
        vg = LVMVolumeGroupDevice("vg", parents=[pv, pv2])

        cache_req1 = LVMCacheRequest(Size("512 MiB"), [pv2], "writethrough")
        lv1 = LVMLogicalVolumeDevice("lv1",
                                     parents=[vg],
                                     size=Size("1 GiB"),
                                     grow=True,
                                     cache_request=cache_req1)

        cache_req2 = LVMCacheRequest(Size("512 MiB"), [pv2], "writethrough")
        lv2 = LVMLogicalVolumeDevice("lv2",
                                     parents=[vg],
                                     size=Size("10 GiB"),
                                     grow=True,
                                     cache_request=cache_req2)

        lv3 = LVMLogicalVolumeDevice("lv3",
                                     parents=[vg],
                                     size=Size("10 GiB"),
                                     grow=True,
                                     maxsize=Size("12 GiB"))

        req1 = LVRequest(lv1)
        req2 = LVRequest(lv2)
        req3 = LVRequest(lv3)
        chunk = VGChunk(vg, requests=[req1, req2, req3])

        chunk.grow_requests()

        # the chunk is done growing since its pool has been exhausted
        self.assertEqual(chunk.done, True)

        # there are still two requests remaining since lv1 and lv2 have no max
        self.assertEqual(chunk.remaining, 2)

        # All the sizes should be the same as without the caches (see the
        # test_vgchunk test for their "rationales") because the space for the
        # caches should just be reserved.
        # The extra 11 extents available on the pv2 should go in the 1:10 ratio
        # to req1 and req2.
        self.assertEqual(req1.growth, 395 + 1)
        self.assertEqual(req2.growth, 3956 + 10)
        self.assertEqual(req3.growth, 512)
コード例 #3
0
    def setUp(self):
        pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
                           size=Size("1 GiB"))
        vg = LVMVolumeGroupDevice("testvg", parents=[pv])
        self.lv = LVMLogicalVolumeDevice("testlv", parents=[vg],
                                         fmt=blivet.formats.get_format("xfs"))

        pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"),
                            size=Size("1 GiB"))
        pv3 = StorageDevice("pv3", fmt=blivet.formats.get_format("lvmpv"),
                            size=Size("1 GiB"))
        vg2 = LVMVolumeGroupDevice("testvg2", parents=[pv2, pv3])
        cache_req = LVMCacheRequest(Size("512 MiB"), [pv3], "writethrough")
        self.cached_lv = LVMLogicalVolumeDevice("testcachedlv", parents=[vg2],
                                                fmt=blivet.formats.get_format("xfs"),
                                                exists=False, cache_request=cache_req)
コード例 #4
0
    def test_lvmcached_logical_volume_init(self):
        pv = StorageDevice("pv1",
                           fmt=blivet.formats.get_format("lvmpv"),
                           size=Size("1 GiB"))
        pv2 = StorageDevice("pv2",
                            fmt=blivet.formats.get_format("lvmpv"),
                            size=Size("512 MiB"))
        vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2])

        cache_req = LVMCacheRequest(Size("512 MiB"), [pv2], "writethrough")
        xfs_fmt = blivet.formats.get_format("xfs")
        lv = LVMLogicalVolumeDevice("testlv",
                                    parents=[vg],
                                    fmt=xfs_fmt,
                                    size=Size(xfs_fmt.min_size),
                                    exists=False,
                                    cache_request=cache_req)
        self.assertEqual(lv.size, xfs_fmt.min_size)

        # check that the LV behaves like a cached LV
        self.assertTrue(lv.cached)
        cache = lv.cache
        self.assertIsNotNone(cache)

        # the cache reserves space for its metadata from the requested size, but
        # it may require (and does in this case) a pmspare LV to be allocated
        self.assertEqual(lv.vg_space_used,
                         lv.cache.size + lv.cache.md_size + lv.size)

        # check parameters reported by the (non-existing) cache
        # 512 MiB - 8 MiB (metadata) - 8 MiB (pmspare)
        self.assertEqual(cache.size, Size("496 MiB"))
        self.assertEqual(cache.md_size, Size("8 MiB"))
        self.assertEqual(cache.vg_space_used, Size("504 MiB"))
        self.assertIsInstance(cache.size, Size)
        self.assertIsInstance(cache.md_size, Size)
        self.assertIsInstance(cache.vg_space_used, Size)
        self.assertFalse(cache.exists)
        self.assertIsNone(cache.stats)
        self.assertEqual(cache.mode, "writethrough")
        self.assertIsNone(cache.backing_device_name)
        self.assertIsNone(cache.cache_device_name)
        self.assertEqual(set(cache.fast_pvs), set([pv2]))
コード例 #5
0
 def test_lvm_logical_volume_pv_free_cached(self):
     pv = StorageDevice("pv1",
                        fmt=blivet.formats.get_format("lvmpv"),
                        size=Size("1025 MiB"))
     pv2 = StorageDevice("pv2",
                         fmt=blivet.formats.get_format("lvmpv"),
                         size=Size("513 MiB"))
     vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
     pv_spec = LVPVSpec(pv, Size("256 MiB"))
     pv_spec2 = LVPVSpec(pv2, Size("256 MiB"))
     cache_req = LVMCacheRequest(Size("512 MiB"), [pv], "writethrough")
     lv = LVMLogicalVolumeDevice("testlv",
                                 parents=[vg],
                                 size=Size("512 MiB"),
                                 fmt=blivet.formats.get_format("xfs"),
                                 exists=False,
                                 cache_request=cache_req,
                                 pvs=[pv_spec, pv_spec2])
     self.assertEqual(lv.seg_type, "linear")
     # 1024 MiB (free) - 256 MiB (LV part) - 504 MiB (cache shrank for pmspare space)
     self.assertEqual(pv.format.free, Size("264 MiB"))
     self.assertEqual(pv2.format.free, Size("256 MiB"))
コード例 #6
0
ファイル: lvm_cache.py プロジェクト: atodorov/blivet
                   grow=True,
                   parents=[vg],
                   name="unbounded")
    b.create_device(dev)

    # new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
    dev = b.new_lv(fmt_type="ext4",
                   size=Size("5GiB"),
                   grow=True,
                   maxsize=Size("15GiB"),
                   parents=[vg],
                   name="bounded")
    b.create_device(dev)

    # new lv with a fixed size of 2GiB formatted as swap space
    cache_spec = LVMCacheRequest(size=Size("1GiB"), pvs=[pv2])
    dev = b.new_lv(fmt_type="ext4",
                   size=Size("2GiB"),
                   parents=[vg],
                   name="cached",
                   cache_request=cache_spec)
    b.create_device(dev)

    # allocate the growable lvs
    blivet.partitioning.grow_lvm(b)
    print(b.devicetree)

    # write the new partitions to disk and format them as specified
    b.do_it()
    print(b.devicetree)
    input("Check the state and hit ENTER to trigger cleanup")
コード例 #7
0
    def _execute_logvol_data(self, storage, data, logvol_data):
        """Execute the logvol data.

        :param storage: an instance of the Blivet's storage object
        :param data: an instance of kickstart data
        :param logvol_data: an instance of LogVolData
        """
        devicetree = storage.devicetree

        # FIXME: we should be running sanityCheck on partitioning that is not ks
        # autopart, but that's likely too invasive for #873135 at this moment
        if logvol_data.mountpoint == "/boot" and blivet.arch.is_s390():
            raise StorageError(
                _("/boot cannot be of type \"lvmlv\" on s390x")
            )

        # we might have truncated or otherwise changed the specified vg name
        vgname = data.onPart.get(logvol_data.vgname, logvol_data.vgname)

        size = None

        if logvol_data.percent:
            size = Size(0)

        if logvol_data.mountpoint == "swap":
            ty = "swap"
            logvol_data.mountpoint = ""
            if logvol_data.recommended or logvol_data.hibernation:
                disk_space = self._disk_free_space
                size = suggest_swap_size(
                    hibernation=logvol_data.hibernation,
                    disk_space=disk_space
                )
                logvol_data.grow = False
        else:
            if logvol_data.fstype != "":
                ty = logvol_data.fstype
            else:
                ty = storage.default_fstype

        if size is None and not logvol_data.preexist:
            if not logvol_data.size:
                raise StorageError(
                    _("Size cannot be decided on from kickstart nor obtained from device.")
                )

            size = self._get_size(logvol_data.size, "MiB")

        if logvol_data.thin_pool:
            logvol_data.mountpoint = ""
            ty = None

        # Sanity check mountpoint
        self._check_mount_point(logvol_data.mountpoint)

        # Check that the VG this LV is a member of has already been specified.
        vg = devicetree.get_device_by_name(vgname)
        if not vg:
            raise StorageError(
                _("No volume group exists with the name \"{}\". Specify volume "
                  "groups before logical volumes.").format(logvol_data.vgname)
            )

        # If cache PVs specified, check that they belong to the same VG this LV is a member of
        if logvol_data.cache_pvs:
            pv_devices = self._get_cache_pv_devices(devicetree, logvol_data)
            if not all(pv in vg.pvs for pv in pv_devices):
                raise StorageError(
                    _("Cache PVs must belong to the same VG as the cached LV")
                )

        pool = None
        if logvol_data.thin_volume:
            pool = devicetree.get_device_by_name("%s-%s" % (vg.name, logvol_data.pool_name))
            if not pool:
                raise StorageError(
                    _("No thin pool exists with the name \"{}\". Specify thin pools "
                      "before thin volumes.").format(logvol_data.pool_name)
                )

        # If this specifies an existing request that we should not format,
        # quit here after setting up enough information to mount it later.
        if not logvol_data.format:
            if not logvol_data.name:
                raise StorageError(
                    _("logvol --noformat must also use the --name= option.")
                )

            dev = devicetree.get_device_by_name("%s-%s" % (vg.name, logvol_data.name))
            if not dev:
                raise StorageError(
                    _("Logical volume \"{}\" given in logvol command does "
                      "not exist.").format(logvol_data.name)
                )

            if logvol_data.resize:
                size = dev.raw_device.align_target_size(size)
                if size < dev.currentSize:
                    # shrink
                    try:
                        devicetree.actions.add(ActionResizeFormat(dev, size))
                        devicetree.actions.add(ActionResizeDevice(dev, size))
                    except ValueError as e:
                        self._handle_invalid_target_size(e, logvol_data.size, dev.name)
                else:
                    # grow
                    try:
                        devicetree.actions.add(ActionResizeDevice(dev, size))
                        devicetree.actions.add(ActionResizeFormat(dev, size))
                    except ValueError as e:
                        self._handle_invalid_target_size(e, logvol_data.size, dev.name)

            dev.format.mountpoint = logvol_data.mountpoint
            dev.format.mountopts = logvol_data.fsopts
            if ty == "swap":
                storage.add_fstab_swap(dev)
            return

        # Make sure this LV name is not already used in the requested VG.
        if not logvol_data.preexist:
            tmp = devicetree.get_device_by_name("%s-%s" % (vg.name, logvol_data.name))
            if tmp:
                raise StorageError(
                    _("Logical volume name \"{}\" is already in use in volume group "
                      "\"{}\".").format(logvol_data.name, vg.name)
                )

            if not logvol_data.percent and size and not logvol_data.grow and size < vg.pe_size:
                raise StorageError(
                    _("Logical volume size \"{}\" must be larger than the volume "
                      "group extent size of \"{}\".").format(size, vg.pe_size)
                )

        # Now get a format to hold a lot of these extra values.
        fmt = get_format(
            ty,
            mountpoint=logvol_data.mountpoint,
            label=logvol_data.label,
            fsprofile=logvol_data.fsprofile,
            create_options=logvol_data.mkfsopts,
            mountopts=logvol_data.fsopts
        )
        if not fmt.type and not logvol_data.thin_pool:
            raise StorageError(
                _("The \"{}\" file system type is not supported.").format(ty)
            )

        add_fstab_swap = None
        # If we were given a pre-existing LV to create a filesystem on, we need
        # to verify it and its VG exists and then schedule a new format action
        # to take place there.  Also, we only support a subset of all the
        # options on pre-existing LVs.
        if logvol_data.preexist:
            device = devicetree.get_device_by_name("%s-%s" % (vg.name, logvol_data.name))
            if not device:
                raise StorageError(
                    _("Logical volume \"{}\" given in logvol command does "
                      "not exist.").format(logvol_data.name)
                )

            storage.devicetree.recursive_remove(device, remove_device=False)

            if logvol_data.resize:
                size = device.raw_device.align_target_size(size)
                try:
                    devicetree.actions.add(ActionResizeDevice(device, size))
                except ValueError as e:
                    self._handle_invalid_target_size(e, logvol_data.size, device.name)

            devicetree.actions.add(ActionCreateFormat(device, fmt))
            if ty == "swap":
                add_fstab_swap = device
        else:
            # If a previous device has claimed this mount point, delete the
            # old one.
            try:
                if logvol_data.mountpoint:
                    device = storage.mountpoints[logvol_data.mountpoint]
                    storage.destroy_device(device)
            except KeyError:
                pass

            if logvol_data.thin_volume:
                parents = [pool]
            else:
                parents = [vg]

            pool_args = {}
            if logvol_data.thin_pool:
                if logvol_data.profile:
                    matching = (p for p in KNOWN_THPOOL_PROFILES if p.name == logvol_data.profile)
                    profile = next(matching, None)
                    if profile:
                        pool_args["profile"] = profile
                    else:
                        log.warning(
                            "No matching profile for %s found in LVM configuration",
                            logvol_data.profile
                        )
                if logvol_data.metadata_size:
                    pool_args["metadata_size"] = Size("%d MiB" % logvol_data.metadata_size)
                if logvol_data.chunk_size:
                    pool_args["chunk_size"] = Size("%d KiB" % logvol_data.chunk_size)

            if logvol_data.maxSizeMB:
                maxsize = self._get_size(logvol_data.maxSizeMB, "MiB")
            else:
                maxsize = None

            if logvol_data.cache_size and logvol_data.cache_pvs:
                pv_devices = self._get_cache_pv_devices(devicetree, logvol_data)
                cache_size = Size("%d MiB" % logvol_data.cache_size)
                cache_mode = logvol_data.cache_mode or None
                cache_request = LVMCacheRequest(cache_size, pv_devices, cache_mode)
            else:
                cache_request = None

            request = storage.new_lv(
                fmt=fmt,
                name=logvol_data.name,
                parents=parents,
                size=size,
                thin_pool=logvol_data.thin_pool,
                thin_volume=logvol_data.thin_volume,
                grow=logvol_data.grow,
                maxsize=maxsize,
                percent=logvol_data.percent,
                cache_request=cache_request,
                **pool_args
            )

            storage.create_device(request)
            if ty == "swap":
                add_fstab_swap = request

        if logvol_data.encrypted:
            passphrase = self._get_passphrase(logvol_data)
            cert = storage.get_escrow_certificate(logvol_data.escrowcert)

            # Get the version of LUKS and PBKDF arguments.
            logvol_data.luks_version = logvol_data.luks_version or storage.default_luks_version

            pbkdf_args = get_pbkdf_args(
                luks_version=logvol_data.luks_version,
                pbkdf_type=logvol_data.pbkdf,
                max_memory_kb=logvol_data.pbkdf_memory,
                iterations=logvol_data.pbkdf_iterations,
                time_ms=logvol_data.pbkdf_time
            )

            if pbkdf_args and not luks_data.pbkdf_args:
                luks_data.pbkdf_args = pbkdf_args

            if logvol_data.preexist:
                luksformat = fmt
                device.format = get_format(
                    "luks",
                    passphrase=passphrase,
                    device=device.path,
                    cipher=logvol_data.cipher,
                    escrow_cert=cert,
                    add_backup_passphrase=logvol_data.backuppassphrase,
                    luks_version=logvol_data.luks_version,
                    pbkdf_args=pbkdf_args
                )
                luksdev = LUKSDevice(
                    "luks%d" % storage.next_id,
                    fmt=luksformat,
                    parents=device
                )
            else:
                luksformat = request.format
                request.format = get_format(
                    "luks",
                    passphrase=passphrase,
                    cipher=logvol_data.cipher,
                    escrow_cert=cert,
                    add_backup_passphrase=logvol_data.backuppassphrase,
                    luks_version=logvol_data.luks_version,
                    pbkdf_args=pbkdf_args
                )
                luksdev = LUKSDevice(
                    "luks%d" % storage.next_id,
                    fmt=luksformat,
                    parents=request
                )

            if ty == "swap":
                # swap is on the LUKS device not on the LUKS' parent device,
                # override the info here
                add_fstab_swap = luksdev

            storage.create_device(luksdev)

        if add_fstab_swap:
            storage.add_fstab_swap(add_fstab_swap)