Example #1
0
    def test_align_free_regions(self):
        # disk with two free regions -- first unaligned, second aligned
        disk = Mock()
        disk.format.alignment.grainSize = 2048
        disk.format.parted_disk.getFreeSpaceRegions.return_value = [Mock(start=1, end=2049, length=2049),
                                                                    Mock(start=1, end=2048, length=2048)]

        free = get_free_regions([disk])
        self.assertEqual(free[0].length, 2049)
        self.assertEqual(free[1].length, 2048)

        free = get_free_regions([disk], align=True)
        self.assertEqual(free[0].length, 2048)
        self.assertEqual(free[1].length, 2048)
Example #2
0
    def test_align_free_regions(self):
        # disk with two free regions -- first unaligned, second aligned
        disk = Mock()
        disk.format.alignment.grainSize = 2048
        disk.format.parted_disk.getFreeSpaceRegions.return_value = [Mock(start=1, end=2049, length=2049),
                                                                    Mock(start=1, end=2048, length=2048)]

        free = get_free_regions([disk])
        self.assertEqual(free[0].length, 2049)
        self.assertEqual(free[1].length, 2048)

        free = get_free_regions([disk], align=True)
        self.assertEqual(free[0].length, 2048)
        self.assertEqual(free[1].length, 2048)
Example #3
0
    def test_msdos_disk_chunk1(self):
        disk_size = Size("100 MiB")
        with sparsetmpfile("chunktest", disk_size) as disk_file:
            disk = DiskFile(disk_file)
            disk.format = get_format("disklabel",
                                     device=disk.path,
                                     exists=False,
                                     label_type="msdos")

            p1 = PartitionDevice("p1", size=Size("10 MiB"), grow=True)
            p2 = PartitionDevice("p2", size=Size("30 MiB"), grow=True)

            disks = [disk]
            partitions = [p1, p2]
            free = get_free_regions([disk])
            self.assertEqual(len(free), 1,
                             "free region count %d not expected" % len(free))

            b = Mock(spec=Blivet)
            allocate_partitions(b, disks, partitions, free)

            requests = [PartitionRequest(p) for p in partitions]
            chunk = DiskChunk(free[0], requests=requests)

            # parted reports a first free sector of 32 for msdos on disk files. whatever.
            # XXX on gpt, the start is increased to 34 and the end is reduced from 204799 to 204766,
            #     yielding an expected length of 204733
            length_expected = 204768
            self.assertEqual(chunk.length, length_expected)

            base_expected = sum(p.parted_partition.geometry.length
                                for p in partitions)
            self.assertEqual(chunk.base, base_expected)

            pool_expected = chunk.length - base_expected
            self.assertEqual(chunk.pool, pool_expected)

            self.assertEqual(chunk.done, False)
            self.assertEqual(chunk.remaining, 2)

            chunk.grow_requests()

            self.assertEqual(chunk.done, True)
            self.assertEqual(chunk.pool, 0)
            self.assertEqual(chunk.remaining, 2)

            #
            # validate the growth (everything in sectors)
            #
            # The chunk length is 204768. The base of p1 is 20480. The base of
            # p2 is 61440. The chunk has a base of 81920 and a pool of 122848.
            #
            # p1 should grow by 30712 while p2 grows by 92136 since p2's base
            # size is exactly three times that of p1.
            self.assertEqual(requests[0].growth, 30712)
            self.assertEqual(requests[1].growth, 92136)
Example #4
0
    def test_msdos_disk_chunk1(self):
        disk_size = Size("100 MiB")
        with sparsetmpfile("chunktest", disk_size) as disk_file:
            disk = DiskFile(disk_file)
            disk.format = get_format("disklabel", device=disk.path, exists=False, label_type="msdos")

            p1 = PartitionDevice("p1", size=Size("10 MiB"), grow=True)
            p2 = PartitionDevice("p2", size=Size("30 MiB"), grow=True)

            disks = [disk]
            partitions = [p1, p2]
            free = get_free_regions([disk])
            self.assertEqual(len(free), 1,
                             "free region count %d not expected" % len(free))

            b = Mock(spec=Blivet)
            allocate_partitions(b, disks, partitions, free)

            requests = [PartitionRequest(p) for p in partitions]
            chunk = DiskChunk(free[0], requests=requests)

            # parted reports a first free sector of 32 for msdos on disk files. whatever.
            # XXX on gpt, the start is increased to 34 and the end is reduced from 204799 to 204766,
            #     yielding an expected length of 204733
            length_expected = 204768
            self.assertEqual(chunk.length, length_expected)

            base_expected = sum(p.parted_partition.geometry.length for p in partitions)
            self.assertEqual(chunk.base, base_expected)

            pool_expected = chunk.length - base_expected
            self.assertEqual(chunk.pool, pool_expected)

            self.assertEqual(chunk.done, False)
            self.assertEqual(chunk.remaining, 2)

            chunk.grow_requests()

            self.assertEqual(chunk.done, True)
            self.assertEqual(chunk.pool, 0)
            self.assertEqual(chunk.remaining, 2)

            #
            # validate the growth (everything in sectors)
            #
            # The chunk length is 204768. The base of p1 is 20480. The base of
            # p2 is 61440. The chunk has a base of 81920 and a pool of 122848.
            #
            # p1 should grow by 30712 while p2 grows by 92136 since p2's base
            # size is exactly three times that of p1.
            self.assertEqual(requests[0].growth, 30712)
            self.assertEqual(requests[1].growth, 92136)
Example #5
0
def _schedule_partitions(storage, disks, implicit_devices, requests=None):
    """ Schedule creation of autopart/reqpart partitions.

        This only schedules the requests for actual partitions.

        :param storage: a :class:`pyanaconda.storage.InstallerStorage` instance
        :type storage: :class:`pyanaconda.storage.InstallerStorage`
        :param disks: list of partitioned disks with free space
        :type disks: list of :class:`blivet.devices.StorageDevice`
        :param requests: list of partitioning requests to operate on,
                         or `~.storage.InstallerStorage.autopart_requests` by default
        :type requests: list of :class:`~.storage.partspec.PartSpec` instances
        :returns: None
        :rtype: None
    """
    if not requests:
        requests = storage.autopart_requests

    # basis for requests with required_space is the sum of the sizes of the
    # two largest free regions
    all_free = (Size(reg.getLength(unit="B"))
                for reg in get_free_regions(disks))
    all_free = sorted(all_free, reverse=True)
    if not all_free:
        # this should never happen since we've already filtered the disks
        # to those with at least 500MiB free
        log.error("no free space on disks %s", [d.name for d in disks])
        return

    free = all_free[0]
    if len(all_free) > 1:
        free += all_free[1]

    # The boot disk must be set at this point. See if any platform-specific
    # stage1 device we might allocate already exists on the boot disk.
    stage1_device = None
    for device in storage.devices:
        if storage.bootloader.stage1_disk not in device.disks:
            continue

        if storage.bootloader.is_valid_stage1_device(device, early=True):
            stage1_device = device
            break

    #
    # First pass is for partitions only. We'll do LVs later.
    #
    for request in requests:
        if ((request.lv and storage.do_autopart and storage.autopart_type
             in (AUTOPART_TYPE_LVM, AUTOPART_TYPE_LVM_THINP)) or
            (request.btr and storage.autopart_type == AUTOPART_TYPE_BTRFS)):
            continue

        if request.required_space and request.required_space > free:
            continue

        elif request.fstype in ("prepboot", "efi", "macefi", "hfs+") and \
                (storage.bootloader.skip_bootloader or stage1_device):
            # there should never be a need for more than one of these
            # partitions, so skip them.
            log.info("skipping unneeded stage1 %s request", request.fstype)
            log.debug("%s", request)

            if request.fstype in ["efi", "macefi"] and stage1_device:
                # Set the mountpoint for the existing EFI boot partition
                stage1_device.format.mountpoint = "/boot/efi"

            log.debug("%s", stage1_device)
            continue
        elif request.fstype == "biosboot":
            is_gpt = (stage1_device and getattr(stage1_device.format,
                                                "label_type", None) == "gpt")
            has_bios_boot = (stage1_device and any([
                p.format.type == "biosboot"
                for p in storage.partitions if p.disk == stage1_device
            ]))
            if (storage.bootloader.skip_bootloader
                    or not (stage1_device and stage1_device.is_disk and is_gpt
                            and not has_bios_boot)):
                # there should never be a need for more than one of these
                # partitions, so skip them.
                log.info("skipping unneeded stage1 %s request", request.fstype)
                log.debug("%s", request)
                log.debug("%s", stage1_device)
                continue

        if request.size > all_free[0]:
            # no big enough free space for the requested partition
            raise NotEnoughFreeSpaceError(
                _("No big enough free space on disks for "
                  "automatic partitioning"))

        if request.encrypted and storage.encrypted_autopart:
            fmt_type = "luks"
            fmt_args = {
                "passphrase": luks_data.encryption_passphrase,
                "cipher": storage.encryption_cipher,
                "escrow_cert": storage.autopart_escrow_cert,
                "add_backup_passphrase":
                storage.autopart_add_backup_passphrase,
                "min_luks_entropy": luks_data.min_entropy,
                "luks_version": storage.autopart_luks_version,
                "pbkdf_args": storage.autopart_pbkdf_args
            }
        else:
            fmt_type = request.fstype
            fmt_args = {}

        dev = storage.new_partition(fmt_type=fmt_type,
                                    fmt_args=fmt_args,
                                    size=request.size,
                                    grow=request.grow,
                                    maxsize=request.max_size,
                                    mountpoint=request.mountpoint,
                                    parents=disks)

        # schedule the device for creation
        storage.create_device(dev)

        if request.encrypted and storage.encrypted_autopart:
            luks_fmt = get_format(request.fstype,
                                  device=dev.path,
                                  mountpoint=request.mountpoint)
            luks_dev = LUKSDevice("luks-%s" % dev.name,
                                  fmt=luks_fmt,
                                  size=dev.size,
                                  parents=dev)
            storage.create_device(luks_dev)

        if storage.do_autopart and \
           storage.autopart_type in (AUTOPART_TYPE_LVM, AUTOPART_TYPE_LVM_THINP,
                                     AUTOPART_TYPE_BTRFS):
            # doing LVM/BTRFS -- make sure the newly created partition fits in some
            # free space together with one of the implicitly requested partitions
            smallest_implicit = sorted(implicit_devices,
                                       key=lambda d: d.size)[0]
            if (request.size + smallest_implicit.size) > all_free[0]:
                # not enough space to allocate the smallest implicit partition
                # and the request, make the implicit partitions smaller in
                # attempt to make space for the request
                for implicit_req in implicit_devices:
                    implicit_req.size = FALLBACK_DEFAULT_PART_SIZE

    return implicit_devices
Example #6
0
    def test_msdos_disk_chunk2(self):
        disk_size = Size("100 MiB")
        with sparsetmpfile("chunktest", disk_size) as disk_file:
            disk = DiskFile(disk_file)
            disk.format = get_format("disklabel", device=disk.path, exists=False, label_type="msdos")

            p1 = PartitionDevice("p1", size=Size("10 MiB"), grow=True)
            p2 = PartitionDevice("p2", size=Size("30 MiB"), grow=True)

            # format max size should be reflected in request max growth
            fmt = get_format("dummy")
            fmt._max_size = Size("12 MiB")
            p3 = PartitionDevice("p3", size=Size("10 MiB"), grow=True,
                                 fmt=fmt)

            p4 = PartitionDevice("p4", size=Size("7 MiB"))

            # partition max size should be reflected in request max growth
            p5 = PartitionDevice("p5", size=Size("5 MiB"), grow=True,
                                 maxsize=Size("6 MiB"))

            disks = [disk]
            partitions = [p1, p2, p3, p4, p5]
            free = get_free_regions([disk])
            self.assertEqual(len(free), 1,
                             "free region count %d not expected" % len(free))

            b = Mock(spec=Blivet)
            allocate_partitions(b, disks, partitions, free)

            requests = [PartitionRequest(p) for p in partitions]
            chunk = DiskChunk(free[0], requests=requests)

            self.assertEqual(len(chunk.requests), len(partitions))

            # parted reports a first free sector of 32 for disk files. whatever.
            length_expected = 204768
            self.assertEqual(chunk.length, length_expected)

            growable = [p for p in partitions if p.req_grow]
            fixed = [p for p in partitions if not p.req_grow]
            base_expected = sum(p.parted_partition.geometry.length for p in growable)
            self.assertEqual(chunk.base, base_expected)

            base_fixed = sum(p.parted_partition.geometry.length for p in fixed)
            pool_expected = chunk.length - base_expected - base_fixed
            self.assertEqual(chunk.pool, pool_expected)

            self.assertEqual(chunk.done, False)

            # since p5 is not growable it is initially done
            self.assertEqual(chunk.remaining, 4)

            chunk.grow_requests()

            #
            # validate the growth (in sectors)
            #
            # The chunk length is 204768.
            # Request bases:
            #   p1 20480
            #   p2 61440
            #   p3 20480
            #   p4 14336 (not included in chunk base since it isn't growable)
            #   p5 10240
            #
            # The chunk has a base 112640 and a pool of 77792.
            #
            # Request max growth:
            #   p1 0
            #   p2 0
            #   p3 4096
            #   p4 0
            #   p5 2048
            #
            # The first round should allocate to p1, p2, p3, p5 at a ratio of
            # 2:6:2:1, which is 14144, 42432, 14144, 7072. Due to max growth,
            # p3 and p5 will be limited and the extra (10048, 5024) will remain
            # in the pool. In the second round the remaining requests will be
            # p1 and p2. They will divide up the pool of 15072 at a ratio of
            # 1:3, which is 3768 and 11304. At this point the pool should be
            # empty.
            #
            # Total growth:
            #   p1 17912
            #   p2 53736
            #   p3 4096
            #   p4 0
            #   p5 2048
            #
            self.assertEqual(chunk.done, True)
            self.assertEqual(chunk.pool, 0)
            self.assertEqual(chunk.remaining, 2)    # p1, p2 have no max

            # chunk.requests got sorted, so use the list whose order we know
            self.assertEqual(requests[0].growth, 17912)
            self.assertEqual(requests[1].growth, 53736)
            self.assertEqual(requests[2].growth, 4096)
            self.assertEqual(requests[3].growth, 0)
            self.assertEqual(requests[4].growth, 2048)
Example #7
0
    def test_disk_chunk2(self):
        disk_size = Size("100 MiB")
        with sparsetmpfile("chunktest", disk_size) as disk_file:
            disk = DiskFile(disk_file)
            disk.format = get_format("disklabel",
                                     device=disk.path,
                                     exists=False)

            p1 = PartitionDevice("p1", size=Size("10 MiB"), grow=True)
            p2 = PartitionDevice("p2", size=Size("30 MiB"), grow=True)

            # format max size should be reflected in request max growth
            fmt = get_format("dummy")
            fmt._max_size = Size("12 MiB")
            p3 = PartitionDevice("p3", size=Size("10 MiB"), grow=True, fmt=fmt)

            p4 = PartitionDevice("p4", size=Size("7 MiB"))

            # partition max size should be reflected in request max growth
            p5 = PartitionDevice("p5",
                                 size=Size("5 MiB"),
                                 grow=True,
                                 maxsize=Size("6 MiB"))

            disks = [disk]
            partitions = [p1, p2, p3, p4, p5]
            free = get_free_regions([disk])
            self.assertEqual(len(free), 1,
                             "free region count %d not expected" % len(free))

            b = Mock(spec=Blivet)
            allocate_partitions(b, disks, partitions, free)

            requests = [PartitionRequest(p) for p in partitions]
            chunk = DiskChunk(free[0], requests=requests)

            self.assertEqual(len(chunk.requests), len(partitions))

            # parted reports a first free sector of 32 for disk files. whatever.
            length_expected = 204768
            self.assertEqual(chunk.length, length_expected)

            growable = [p for p in partitions if p.req_grow]
            fixed = [p for p in partitions if not p.req_grow]
            base_expected = sum(p.parted_partition.geometry.length
                                for p in growable)
            self.assertEqual(chunk.base, base_expected)

            base_fixed = sum(p.parted_partition.geometry.length for p in fixed)
            pool_expected = chunk.length - base_expected - base_fixed
            self.assertEqual(chunk.pool, pool_expected)

            self.assertEqual(chunk.done, False)

            # since p5 is not growable it is initially done
            self.assertEqual(chunk.remaining, 4)

            chunk.grow_requests()

            #
            # validate the growth (in sectors)
            #
            # The chunk length is 204768.
            # Request bases:
            #   p1 20480
            #   p2 61440
            #   p3 20480
            #   p4 14336 (not included in chunk base since it isn't growable)
            #   p5 10240
            #
            # The chunk has a base 112640 and a pool of 77792.
            #
            # Request max growth:
            #   p1 0
            #   p2 0
            #   p3 4096
            #   p4 0
            #   p5 2048
            #
            # The first round should allocate to p1, p2, p3, p5 at a ratio of
            # 2:6:2:1, which is 14144, 42432, 14144, 7072. Due to max growth,
            # p3 and p5 will be limited and the extra (10048, 5024) will remain
            # in the pool. In the second round the remaining requests will be
            # p1 and p2. They will divide up the pool of 15072 at a ratio of
            # 1:3, which is 3768 and 11304. At this point the pool should be
            # empty.
            #
            # Total growth:
            #   p1 17912
            #   p2 53736
            #   p3 4096
            #   p4 0
            #   p5 2048
            #
            self.assertEqual(chunk.done, True)
            self.assertEqual(chunk.pool, 0)
            self.assertEqual(chunk.remaining, 2)  # p1, p2 have no max

            # chunk.requests got sorted, so use the list whose order we know
            self.assertEqual(requests[0].growth, 17912)
            self.assertEqual(requests[1].growth, 53736)
            self.assertEqual(requests[2].growth, 4096)
            self.assertEqual(requests[3].growth, 0)
            self.assertEqual(requests[4].growth, 2048)
Example #8
0
def _schedule_partitions(storage, disks, implicit_devices, requests=None):
    """ Schedule creation of autopart/reqpart partitions.

        This only schedules the requests for actual partitions.

        :param storage: a :class:`pyanaconda.storage.InstallerStorage` instance
        :type storage: :class:`pyanaconda.storage.InstallerStorage`
        :param disks: list of partitioned disks with free space
        :type disks: list of :class:`blivet.devices.StorageDevice`
        :param requests: list of partitioning requests to operate on,
                         or `~.storage.InstallerStorage.autopart_requests` by default
        :type requests: list of :class:`~.storage.partspec.PartSpec` instances
        :returns: None
        :rtype: None
    """
    if not requests:
        requests = storage.autopart_requests

    # basis for requests with required_space is the sum of the sizes of the
    # two largest free regions
    all_free = (Size(reg.getLength(unit="B")) for reg in get_free_regions(disks))
    all_free = sorted(all_free, reverse=True)
    if not all_free:
        # this should never happen since we've already filtered the disks
        # to those with at least 500MiB free
        log.error("no free space on disks %s", [d.name for d in disks])
        return

    free = all_free[0]
    if len(all_free) > 1:
        free += all_free[1]

    # The boot disk must be set at this point. See if any platform-specific
    # stage1 device we might allocate already exists on the boot disk.
    stage1_device = None
    for device in storage.devices:
        if storage.bootloader.stage1_disk not in device.disks:
            continue

        if storage.bootloader.is_valid_stage1_device(device, early=True):
            stage1_device = device
            break

    #
    # First pass is for partitions only. We'll do LVs later.
    #
    for request in requests:
        if ((request.lv and storage.do_autopart and
             storage.autopart_type in (AUTOPART_TYPE_LVM,
                                       AUTOPART_TYPE_LVM_THINP)) or
                (request.btr and storage.autopart_type == AUTOPART_TYPE_BTRFS)):
            continue

        if request.required_space and request.required_space > free:
            continue

        elif request.fstype in ("prepboot", "efi", "macefi", "hfs+") and \
                (storage.bootloader.skip_bootloader or stage1_device):
            # there should never be a need for more than one of these
            # partitions, so skip them.
            log.info("skipping unneeded stage1 %s request", request.fstype)
            log.debug("%s", request)

            if request.fstype in ["efi", "macefi"] and stage1_device:
                # Set the mountpoint for the existing EFI boot partition
                stage1_device.format.mountpoint = "/boot/efi"

            log.debug("%s", stage1_device)
            continue
        elif request.fstype == "biosboot":
            is_gpt = (stage1_device and
                      getattr(stage1_device.format, "label_type", None) == "gpt")
            has_bios_boot = (stage1_device and
                             any([p.format.type == "biosboot"
                                  for p in storage.partitions
                                  if p.disk == stage1_device]))
            if (storage.bootloader.skip_bootloader or
                not (stage1_device and stage1_device.is_disk and
                     is_gpt and not has_bios_boot)):
                # there should never be a need for more than one of these
                # partitions, so skip them.
                log.info("skipping unneeded stage1 %s request", request.fstype)
                log.debug("%s", request)
                log.debug("%s", stage1_device)
                continue

        if request.size > all_free[0]:
            # no big enough free space for the requested partition
            raise NotEnoughFreeSpaceError(_("No big enough free space on disks for "
                                            "automatic partitioning"))

        if request.encrypted and storage.encrypted_autopart:
            fmt_type = "luks"
            fmt_args = {"passphrase": luks_data.encryption_passphrase,
                        "cipher": storage.encryption_cipher,
                        "escrow_cert": storage.autopart_escrow_cert,
                        "add_backup_passphrase": storage.autopart_add_backup_passphrase,
                        "min_luks_entropy": luks_data.min_entropy,
                        "luks_version": storage.autopart_luks_version,
                        "pbkdf_args": storage.autopart_pbkdf_args
                        }
        else:
            fmt_type = request.fstype
            fmt_args = {}

        dev = storage.new_partition(fmt_type=fmt_type,
                                    fmt_args=fmt_args,
                                    size=request.size,
                                    grow=request.grow,
                                    maxsize=request.max_size,
                                    mountpoint=request.mountpoint,
                                    parents=disks)

        # schedule the device for creation
        storage.create_device(dev)

        if request.encrypted and storage.encrypted_autopart:
            luks_fmt = get_format(request.fstype,
                                  device=dev.path,
                                  mountpoint=request.mountpoint)
            luks_dev = LUKSDevice("luks-%s" % dev.name,
                                  fmt=luks_fmt,
                                  size=dev.size,
                                  parents=dev)
            storage.create_device(luks_dev)

        if storage.do_autopart and \
           storage.autopart_type in (AUTOPART_TYPE_LVM, AUTOPART_TYPE_LVM_THINP,
                                     AUTOPART_TYPE_BTRFS):
            # doing LVM/BTRFS -- make sure the newly created partition fits in some
            # free space together with one of the implicitly requested partitions
            smallest_implicit = sorted(implicit_devices, key=lambda d: d.size)[0]
            if (request.size + smallest_implicit.size) > all_free[0]:
                # not enough space to allocate the smallest implicit partition
                # and the request, make the implicit partitions smaller in
                # attempt to make space for the request
                for implicit_req in implicit_devices:
                    implicit_req.size = FALLBACK_DEFAULT_PART_SIZE

    return implicit_devices