Exemple #1
0
    def is_device_locked_test(self):
        """Test IsDeviceLocked."""
        dev1 = StorageDevice("dev1",
                             fmt=get_format("ext4"),
                             size=Size("10 GiB"))
        dev2 = LUKSDevice(
            "dev2",
            parents=[dev1],
            fmt=get_format("luks"),
            size=Size("10 GiB"),
        )
        dev3 = LUKSDevice(
            "dev3",
            parents=[dev1],
            fmt=get_format("luks", exists=True),
            size=Size("10 GiB"),
        )

        self._add_device(dev1)
        self._add_device(dev2)
        self._add_device(dev3)

        self.assertEqual(self.interface.IsDeviceLocked("dev1"), False)
        self.assertEqual(self.interface.IsDeviceLocked("dev2"), False)
        self.assertEqual(self.interface.IsDeviceLocked("dev3"), True)
Exemple #2
0
def _unlock_devices(intf, storage):
    try_passphrase = None
    for device in storage.devices:
        if device.format.type == "luks":
            skip = False
            unlocked = False
            while not (skip or unlocked):
                if try_passphrase is None:
                    passphrase = intf.passphraseEntryWindow(device.name)
                else:
                    passphrase = try_passphrase

                if passphrase is None:
                    # canceled
                    skip = True
                else:
                    device.format.passphrase = passphrase
                    try:
                        device.setup()
                        device.format.setup()
                        luks_dev = LUKSDevice(device.format.mapName,
                                              parents=[device],
                                              exists=True)
                        storage.devicetree._addDevice(luks_dev)
                        storage.devicetree.populate()
                        unlocked = True
                        # try to use the same passhprase for other devices
                        try_passphrase = passphrase
                    except StorageError as serr:
                        log.error("Failed to unlock %s: %s", device.name, serr)
                        device.teardown(recursive=True)
                        device.format.passphrase = None
                        try_passphrase = None
Exemple #3
0
    def test_unlock_device(self, device_setup, device_teardown, format_setup):
        """Test UnlockDevice."""
        self.storage.devicetree.populate = Mock()
        self.storage.devicetree.teardown_all = Mock()

        dev1 = StorageDevice("dev1", fmt=get_format("ext4"), size=Size("10 GiB"))
        self._add_device(dev1)

        dev2 = LUKSDevice("dev2", parents=[dev1], fmt=get_format("luks"), size=Size("10 GiB"))
        self._add_device(dev2)

        self.assertEqual(self.interface.UnlockDevice("dev2", "passphrase"), True)

        device_setup.assert_called_once()
        format_setup.assert_called_once()
        device_teardown.assert_not_called()
        self.storage.devicetree.populate.assert_called_once()
        self.storage.devicetree.teardown_all.assert_called_once()
        self.assertTrue(dev2.format.has_key)

        device_setup.side_effect = StorageError("Fake error")
        self.assertEqual(self.interface.UnlockDevice("dev2", "passphrase"), False)

        device_teardown.assert_called_once()
        self.assertFalse(dev2.format.has_key)
Exemple #4
0
    def unlock_device(self, device_name, passphrase):
        """Unlocks LUKS device."""
        device_state = self._find_device_state(device_name)
        if device_state is None:
            # TODO: raise an exception?
            log.error("Can't find device to unlock %s", device_name)
            return False

        device = device_state.device
        device.format.passphrase = passphrase
        try:
            device.setup()
            device.format.setup()
            luks_device = LUKSDevice(device.format.map_name,
                                     parents=[device],
                                     exists=True)
            self._storage.devicetree._add_device(luks_device)

            # Wait for the device.
            # Otherwise, we could get a message about no Linux partitions.
            time.sleep(2)

            try_populate_devicetree(self._storage.devicetree)
        except StorageError as serr:
            log.error("Failed to unlock %s: %s", device.name, serr)
            device.teardown(recursive=True)
            device.format.passphrase = None
            return False
        else:
            device_state.set_unlocked(passphrase)
            return True
Exemple #5
0
    def get_format_data_test(self):
        """Test GetFormatData."""
        fmt1 = get_format("ext4", uuid="1234-56-7890", label="LABEL")
        dev1 = StorageDevice("dev1", fmt=fmt1, size=Size("10 GiB"))

        self._add_device(dev1)

        self.assertEqual(self.interface.GetFormatData("dev1"), {
            'type': get_variant(Str, 'ext4'),
            'attrs': get_variant(Dict[Str, Str], {
                "uuid": "1234-56-7890",
                "label": "LABEL",
            }),
            'description': get_variant(Str, 'ext4'),
        })

        fmt2 = get_format("luks")
        dev2 = LUKSDevice("dev2", parents=[dev1], fmt=fmt2, size=Size("10 GiB"))

        self._add_device(dev2)

        self.assertEqual(self.interface.GetFormatData("dev2"), {
            'type': get_variant(Str, 'luks'),
            'attrs': get_variant(Dict[Str, Str], {}),
            'description': get_variant(Str, 'LUKS'),
        })
Exemple #6
0
    def setUp(self):
        dev1 = DiskDevice("name", fmt=getFormat("mdmember"))
        dev2 = DiskDevice("other")
        self.part = PartitionDevice("part", fmt=getFormat("mdmember"), parents=[dev2])
        self.dev = MDRaidArrayDevice("dev", level="raid1", parents=[dev1, self.part], fmt=getFormat("luks"))
        self.luks = LUKSDevice("luks", parents=[self.dev], fmt=getFormat("ext4"))

        self.mdraid_method = availability.BLOCKDEV_MDRAID_PLUGIN._method
        self.dm_method = availability.BLOCKDEV_DM_PLUGIN._method
        self.cache_availability = availability.CACHE_AVAILABILITY
Exemple #7
0
    def test_set_device_passphrase(self):
        """Test SetDevicePassphrase."""
        dev1 = StorageDevice("dev1", fmt=get_format("ext4"), size=Size("10 GiB"))
        self._add_device(dev1)

        dev2 = LUKSDevice("dev2", parents=[dev1], fmt=get_format("luks"), size=Size("10 GiB"))
        self._add_device(dev2)

        self.assertEqual(self.interface.FindUnconfiguredLUKS(), ["dev2"])
        self.interface.SetDevicePassphrase("dev2", "123456")
        self.assertEqual(self.interface.FindUnconfiguredLUKS(), [])
    def test_dependencies(self):
        dev1 = DiskDevice("name", fmt=get_format("mdmember"))
        dev2 = DiskDevice("other", fmt=get_format("mdmember"))
        dev = MDRaidArrayDevice("dev", level="raid1", parents=[dev1, dev2])
        luks = LUKSDevice("luks", parents=[dev])

        # a parent's dependencies are a subset of its child's.
        for d in dev.external_dependencies:
            self.assertIn(d, luks.external_dependencies)

        # make sure that there's at least something in these dependencies
        self.assertGreater(len(luks.external_dependencies), 0)
Exemple #9
0
    def test_find_unconfigured_luks(self):
        """Test FindUnconfiguredLUKS."""
        self.assertEqual(self.interface.FindUnconfiguredLUKS(), [])

        dev1 = StorageDevice("dev1", fmt=get_format("ext4"), size=Size("10 GiB"))
        self._add_device(dev1)

        self.assertEqual(self.interface.FindUnconfiguredLUKS(), [])

        dev2 = LUKSDevice("dev2", parents=[dev1], fmt=get_format("luks"), size=Size("10 GiB"))
        self._add_device(dev2)

        self.assertEqual(self.interface.FindUnconfiguredLUKS(), ["dev2"])
    def get_raw_device_test(self):
        """Test GetRawDevice."""
        dev1 = StorageDevice("dev1",
                             fmt=get_format("ext4"),
                             size=Size("10 GiB"))
        dev2 = LUKSDevice("dev2",
                          parents=[dev1],
                          fmt=get_format("luks"),
                          size=Size("10 GiB"))

        self._add_device(dev1)
        self._add_device(dev2)

        self.assertEqual(self.interface.GetRawDevice("dev1"), "dev1")
        self.assertEqual(self.interface.GetRawDevice("dev2"), "dev1")
Exemple #11
0
    def _unlock_devices(self):
        """
            Loop through devices and attempt to unlock any which are detected as
            LUKS devices.
        """
        try_passphrase = None
        for device in self.storage.devices:
            if device.format.type != "luks":
                continue

            skip = False
            unlocked = False
            while not (skip or unlocked):
                if try_passphrase is None:
                    p = PasswordDialog(self.app, device.name)
                    self.app.switch_screen_modal(p)
                    if p.answer:
                        passphrase = p.answer.strip()
                else:
                    passphrase = try_passphrase

                if passphrase is None:
                    # canceled
                    skip = True
                else:
                    device.format.passphrase = passphrase
                    try:
                        device.setup()
                        device.format.setup()
                        luks_dev = LUKSDevice(device.format.map_name,
                                              parents=[device],
                                              exists=True)
                        self.storage.devicetree._add_device(luks_dev)

                        # Wait for the device.
                        # Otherwise, we could get a message about no Linux partitions.
                        time.sleep(2)

                        try_populate_devicetree(self.storage.devicetree)
                        unlocked = True
                        # try to use the same passhprase for other devices
                        try_passphrase = passphrase
                    except StorageError as serr:
                        log.error("Failed to unlock %s: %s", device.name, serr)
                        device.teardown(recursive=True)
                        device.format.passphrase = None
                        try_passphrase = None
        return True
    def test_packages(self):
        dev1 = DiskDevice("name", fmt=get_format("mdmember"))

        dev2 = DiskDevice("other", fmt=get_format("mdmember"))
        dev = MDRaidArrayDevice("dev", level="raid1", parents=[dev1, dev2])
        luks = LUKSDevice("luks", parents=[dev])
        packages = luks.packages

        # no duplicates in list of packages
        self.assertEqual(len(packages), len(set(packages)))

        # several packages that ought to be included are
        for package in dev1.packages + dev2.packages + dev.packages:
            self.assertIn(package, packages)

        for package in dev1.format.packages + dev2.format.packages + dev.format.packages:
            self.assertIn(package, packages)
def change_encryption(storage, device, encrypted, luks_version):
    """Change encryption of the given device.

    :param storage: an instance of Blivet
    :param device: a device to change
    :param encrypted: should we encrypt the device?
    :param luks_version: a version of LUKS
    :return: a LUKS device or a device slave
    """
    if not encrypted:
        log.info("removing encryption from %s", device.name)
        storage.destroy_device(device)
        return device.slave
    else:
        log.info("applying encryption to %s", device.name)
        new_fmt = get_format("luks", device=device.path, luks_version=luks_version)
        storage.format_device(device, new_fmt)
        luks_dev = LUKSDevice("luks-" + device.name, parents=[device])
        storage.create_device(luks_dev)
        return luks_dev
    def test_get_format_data(self):
        """Test GetFormatData."""
        fmt1 = get_format("ext4",
                          uuid="1234-56-7890",
                          label="LABEL",
                          mountpoint="/home")
        dev1 = StorageDevice("dev1", fmt=fmt1, size=Size("10 GiB"))
        self._add_device(dev1)

        assert self.interface.GetFormatData("dev1") == {
            'type':
            get_variant(Str, 'ext4'),
            'mountable':
            get_variant(Bool, True),
            'attrs':
            get_variant(Dict[Str, Str], {
                "uuid": "1234-56-7890",
                "label": "LABEL",
                "mount-point": "/home"
            }),
            'description':
            get_variant(Str, 'ext4'),
        }

        fmt2 = get_format("luks")
        dev2 = LUKSDevice("dev2",
                          parents=[dev1],
                          fmt=fmt2,
                          size=Size("10 GiB"))
        self._add_device(dev2)

        assert self.interface.GetFormatData("dev2") == {
            'type': get_variant(Str, 'luks'),
            'mountable': get_variant(Bool, False),
            'attrs': get_variant(Dict[Str, Str], {}),
            'description': get_variant(Str, 'LUKS'),
        }
Exemple #15
0
    def setUp(self):
        dev1 = DiskDevice("name",
                          fmt=get_format("mdmember"),
                          size=Size("1 GiB"))
        dev2 = DiskDevice("other")
        self.part = PartitionDevice("part",
                                    fmt=get_format("mdmember"),
                                    parents=[dev2])
        self.dev = MDRaidArrayDevice("dev",
                                     level="raid1",
                                     parents=[dev1, self.part],
                                     fmt=get_format("luks"),
                                     total_devices=2,
                                     member_devices=2)
        self.luks = LUKSDevice("luks",
                               parents=[self.dev],
                               fmt=get_format("ext4"))

        self.mdraid_method = availability.BLOCKDEV_MDRAID_PLUGIN._method
        self.dm_method = availability.BLOCKDEV_DM_PLUGIN._method
        self.hfsplus_method = availability.MKFS_HFSPLUS_APP._method
        self.cache_availability = availability.CACHE_AVAILABILITY

        self.addCleanup(self._clean_up)
    def _execute_partition_data(self, storage, data, partition_data):
        """Execute the partition data.

        :param storage: an instance of the Blivet's storage object
        :param data: an instance of kickstart data
        :param partition_data: an instance of PartData
        """
        devicetree = storage.devicetree
        kwargs = {}

        if partition_data.onbiosdisk != "":
            # edd_dict is only modified during storage.reset(), so don't do that
            # while executing storage.
            for (disk, biosdisk) in storage.edd_dict.items():
                if "%x" % biosdisk == partition_data.onbiosdisk:
                    partition_data.disk = disk
                    break

            if not partition_data.disk:
                raise StorageError(
                    _("No disk found for specified BIOS disk \"{}\".").format(
                        partition_data.onbiosdisk
                    )
                )

        size = None

        if partition_data.mountpoint == "swap":
            ty = "swap"
            partition_data.mountpoint = ""
            if partition_data.recommended or partition_data.hibernation:
                disk_space = self._disk_free_space
                size = suggest_swap_size(
                    hibernation=partition_data.hibernation,
                    disk_space=disk_space
                )
                partition_data.grow = False
        # if people want to specify no mountpoint for some reason, let them
        # this is really needed for pSeries boot partitions :(
        elif partition_data.mountpoint == "None":
            partition_data.mountpoint = ""
            if partition_data.fstype:
                ty = partition_data.fstype
            else:
                ty = storage.default_fstype
        elif partition_data.mountpoint == 'appleboot':
            ty = "appleboot"
            partition_data.mountpoint = ""
        elif partition_data.mountpoint == 'prepboot':
            ty = "prepboot"
            partition_data.mountpoint = ""
        elif partition_data.mountpoint == 'biosboot':
            ty = "biosboot"
            partition_data.mountpoint = ""
        elif partition_data.mountpoint.startswith("raid."):
            ty = "mdmember"
            kwargs["name"] = partition_data.mountpoint
            partition_data.mountpoint = ""

            if devicetree.get_device_by_name(kwargs["name"]):
                raise StorageError(
                    _("RAID partition \"{}\" is defined multiple times.").format(kwargs["name"])
                )

            if partition_data.onPart:
                data.onPart[kwargs["name"]] = partition_data.onPart
        elif partition_data.mountpoint.startswith("pv."):
            ty = "lvmpv"
            kwargs["name"] = partition_data.mountpoint
            partition_data.mountpoint = ""

            if devicetree.get_device_by_name(kwargs["name"]):
                raise StorageError(
                    _("PV partition \"{}\" is defined multiple times.").format(kwargs["name"])
                )

            if partition_data.onPart:
                data.onPart[kwargs["name"]] = partition_data.onPart
        elif partition_data.mountpoint.startswith("btrfs."):
            ty = "btrfs"
            kwargs["name"] = partition_data.mountpoint
            partition_data.mountpoint = ""

            if devicetree.get_device_by_name(kwargs["name"]):
                raise StorageError(
                    _("Btrfs partition \"{}\" is defined multiple times.").format(kwargs["name"])
                )

            if partition_data.onPart:
                data.onPart[kwargs["name"]] = partition_data.onPart
        elif partition_data.mountpoint == "/boot/efi":
            if blivet.arch.is_mactel():
                ty = "macefi"
            else:
                ty = "EFI System Partition"
                partition_data.fsopts = "defaults,uid=0,gid=0,umask=077,shortname=winnt"
        else:
            if partition_data.fstype != "":
                ty = partition_data.fstype
            elif partition_data.mountpoint == "/boot":
                ty = storage.default_boot_fstype
            else:
                ty = storage.default_fstype

        if not size and partition_data.size:
            size = self._get_size(partition_data.size, "MiB")

        # If this specified an existing request that we should not format,
        # quit here after setting up enough information to mount it later.
        if not partition_data.format:
            if not partition_data.onPart:
                raise StorageError(_("part --noformat must also use the --onpart option."))

            dev = devicetree.resolve_device(partition_data.onPart)
            if not dev:
                raise StorageError(
                    _("Partition \"{}\" given in part command does "
                      "not exist.").format(partition_data.onPart)
                )

            if partition_data.resize:
                size = dev.raw_device.align_target_size(size)
                if size < dev.currentSize:
                    # shrink
                    try:
                        devicetree.actions.add(ActionResizeFormat(dev, size))
                        devicetree.actions.add(ActionResizeDevice(dev, size))
                    except ValueError as e:
                        self._handle_invalid_target_size(e, partition_data.size, dev.name)
                else:
                    # grow
                    try:
                        devicetree.actions.add(ActionResizeDevice(dev, size))
                        devicetree.actions.add(ActionResizeFormat(dev, size))
                    except ValueError as e:
                        self._handle_invalid_target_size(e, partition_data.size, dev.name)

            dev.format.mountpoint = partition_data.mountpoint
            dev.format.mountopts = partition_data.fsopts
            if ty == "swap":
                storage.add_fstab_swap(dev)
            return

        # Now get a format to hold a lot of these extra values.
        kwargs["fmt"] = get_format(ty,
                                   mountpoint=partition_data.mountpoint,
                                   label=partition_data.label,
                                   fsprofile=partition_data.fsprofile,
                                   mountopts=partition_data.fsopts,
                                   create_options=partition_data.mkfsopts,
                                   size=size)
        if not kwargs["fmt"].type:
            raise StorageError(
                _("The \"{}\" file system type is not supported.").format(ty)
            )

        # If we were given a specific disk to create the partition on, verify
        # that it exists first.  If it doesn't exist, see if it exists with
        # mapper/ on the front.  If that doesn't exist either, it's an error.
        if partition_data.disk:
            disk = devicetree.resolve_device(partition_data.disk)
            # if this is a multipath member promote it to the real mpath
            if disk and disk.format.type == "multipath_member":
                mpath_device = disk.children[0]
                log.info("kickstart: part: promoting %s to %s", disk.name, mpath_device.name)
                disk = mpath_device
            if not disk:
                raise StorageError(
                    _("Disk \"{}\" given in part command does "
                      "not exist.").format(partition_data.disk)
                )
            if not disk.partitionable:
                raise StorageError(
                    _("Cannot install to unpartitionable device "
                      "\"{}\".").format(partition_data.disk)
                )

            if disk and disk.partitioned:
                kwargs["parents"] = [disk]
            elif disk:
                raise StorageError(
                    _("Disk \"{}\" in part command is not "
                      "partitioned.").format(partition_data.disk)
                )

            if not kwargs["parents"]:
                raise StorageError(
                    _("Disk \"{}\" given in part command does "
                      "not exist.").format(partition_data.disk)
                )

        kwargs["grow"] = partition_data.grow
        kwargs["size"] = size

        if partition_data.maxSizeMB:
            maxsize = self._get_size(partition_data.maxSizeMB, "MiB")
        else:
            maxsize = None

        kwargs["maxsize"] = maxsize
        kwargs["primary"] = partition_data.primOnly

        add_fstab_swap = None
        # If we were given a pre-existing partition to create a filesystem on,
        # we need to verify it exists and then schedule a new format action to
        # take place there.  Also, we only support a subset of all the options
        # on pre-existing partitions.
        if partition_data.onPart:
            device = devicetree.resolve_device(partition_data.onPart)
            if not device:
                raise StorageError(
                    _("Partition \"{}\" given in part command does "
                      "not exist.").format(partition_data.onPart)
                )

            storage.devicetree.recursive_remove(device, remove_device=False)
            if partition_data.resize:
                size = device.raw_device.align_target_size(size)
                try:
                    devicetree.actions.add(ActionResizeDevice(device, size))
                except ValueError as e:
                    self._handle_invalid_target_size(e, partition_data.size, device.name)

            devicetree.actions.add(ActionCreateFormat(device, kwargs["fmt"]))
            if ty == "swap":
                add_fstab_swap = device
        # tmpfs mounts are not disks and don't occupy a disk partition,
        # so handle them here
        elif partition_data.fstype == "tmpfs":
            request = storage.new_tmp_fs(**kwargs)
            storage.create_device(request)
        else:
            # If a previous device has claimed this mount point, delete the
            # old one.
            try:
                if partition_data.mountpoint:
                    device = storage.mountpoints[partition_data.mountpoint]
                    storage.destroy_device(device)
            except KeyError:
                pass

            request = storage.new_partition(**kwargs)
            storage.create_device(request)

            if ty == "swap":
                add_fstab_swap = request

        if partition_data.encrypted:
            passphrase = self._get_passphrase(partition_data)
            cert = storage.get_escrow_certificate(partition_data.escrowcert)

            # Get the version of LUKS and PBKDF arguments.
            partition_data.luks_version = (partition_data.luks_version
                                           or storage.default_luks_version)
            pbkdf_args = get_pbkdf_args(
                luks_version=partition_data.luks_version,
                pbkdf_type=partition_data.pbkdf,
                max_memory_kb=partition_data.pbkdf_memory,
                iterations=partition_data.pbkdf_iterations,
                time_ms=partition_data.pbkdf_time
            )

            if pbkdf_args and not luks_data.pbkdf_args:
                luks_data.pbkdf_args = pbkdf_args

            if partition_data.onPart:
                luksformat = kwargs["fmt"]
                device.format = get_format(
                    "luks",
                    passphrase=passphrase,
                    device=device.path,
                    cipher=partition_data.cipher,
                    escrow_cert=cert,
                    add_backup_passphrase=partition_data.backuppassphrase,
                    luks_version=partition_data.luks_version,
                    pbkdf_args=pbkdf_args
                )
                luksdev = LUKSDevice(
                    "luks%d" % storage.next_id,
                    fmt=luksformat,
                    parents=device
                )
            else:
                luksformat = request.format
                request.format = get_format(
                    "luks",
                    passphrase=passphrase,
                    cipher=partition_data.cipher,
                    escrow_cert=cert,
                    add_backup_passphrase=partition_data.backuppassphrase,
                    luks_version=partition_data.luks_version,
                    pbkdf_args=pbkdf_args
                )
                luksdev = LUKSDevice("luks%d" % storage.next_id,
                                     fmt=luksformat,
                                     parents=request)

            if ty == "swap":
                # swap is on the LUKS device not on the LUKS' parent device,
                # override the info here
                add_fstab_swap = luksdev

            storage.create_device(luksdev)

        if add_fstab_swap:
            storage.add_fstab_swap(add_fstab_swap)
    def _execute_logvol_data(self, storage, data, logvol_data):
        """Execute the logvol data.

        :param storage: an instance of the Blivet's storage object
        :param data: an instance of kickstart data
        :param logvol_data: an instance of LogVolData
        """
        devicetree = storage.devicetree

        # FIXME: we should be running sanityCheck on partitioning that is not ks
        # autopart, but that's likely too invasive for #873135 at this moment
        if logvol_data.mountpoint == "/boot" and blivet.arch.is_s390():
            raise StorageError(
                _("/boot cannot be of type \"lvmlv\" on s390x")
            )

        # we might have truncated or otherwise changed the specified vg name
        vgname = data.onPart.get(logvol_data.vgname, logvol_data.vgname)

        size = None

        if logvol_data.percent:
            size = Size(0)

        if logvol_data.mountpoint == "swap":
            ty = "swap"
            logvol_data.mountpoint = ""
            if logvol_data.recommended or logvol_data.hibernation:
                disk_space = self._disk_free_space
                size = suggest_swap_size(
                    hibernation=logvol_data.hibernation,
                    disk_space=disk_space
                )
                logvol_data.grow = False
        else:
            if logvol_data.fstype != "":
                ty = logvol_data.fstype
            else:
                ty = storage.default_fstype

        if size is None and not logvol_data.preexist:
            if not logvol_data.size:
                raise StorageError(
                    _("Size cannot be decided on from kickstart nor obtained from device.")
                )

            size = self._get_size(logvol_data.size, "MiB")

        if logvol_data.thin_pool:
            logvol_data.mountpoint = ""
            ty = None

        # Sanity check mountpoint
        self._check_mount_point(logvol_data.mountpoint)

        # Check that the VG this LV is a member of has already been specified.
        vg = devicetree.get_device_by_name(vgname)
        if not vg:
            raise StorageError(
                _("No volume group exists with the name \"{}\". Specify volume "
                  "groups before logical volumes.").format(logvol_data.vgname)
            )

        # If cache PVs specified, check that they belong to the same VG this LV is a member of
        if logvol_data.cache_pvs:
            pv_devices = self._get_cache_pv_devices(devicetree, logvol_data)
            if not all(pv in vg.pvs for pv in pv_devices):
                raise StorageError(
                    _("Cache PVs must belong to the same VG as the cached LV")
                )

        pool = None
        if logvol_data.thin_volume:
            pool = devicetree.get_device_by_name("%s-%s" % (vg.name, logvol_data.pool_name))
            if not pool:
                raise StorageError(
                    _("No thin pool exists with the name \"{}\". Specify thin pools "
                      "before thin volumes.").format(logvol_data.pool_name)
                )

        # If this specifies an existing request that we should not format,
        # quit here after setting up enough information to mount it later.
        if not logvol_data.format:
            if not logvol_data.name:
                raise StorageError(
                    _("logvol --noformat must also use the --name= option.")
                )

            dev = devicetree.get_device_by_name("%s-%s" % (vg.name, logvol_data.name))
            if not dev:
                raise StorageError(
                    _("Logical volume \"{}\" given in logvol command does "
                      "not exist.").format(logvol_data.name)
                )

            if logvol_data.resize:
                size = dev.raw_device.align_target_size(size)
                if size < dev.currentSize:
                    # shrink
                    try:
                        devicetree.actions.add(ActionResizeFormat(dev, size))
                        devicetree.actions.add(ActionResizeDevice(dev, size))
                    except ValueError as e:
                        self._handle_invalid_target_size(e, logvol_data.size, dev.name)
                else:
                    # grow
                    try:
                        devicetree.actions.add(ActionResizeDevice(dev, size))
                        devicetree.actions.add(ActionResizeFormat(dev, size))
                    except ValueError as e:
                        self._handle_invalid_target_size(e, logvol_data.size, dev.name)

            dev.format.mountpoint = logvol_data.mountpoint
            dev.format.mountopts = logvol_data.fsopts
            if ty == "swap":
                storage.add_fstab_swap(dev)
            return

        # Make sure this LV name is not already used in the requested VG.
        if not logvol_data.preexist:
            tmp = devicetree.get_device_by_name("%s-%s" % (vg.name, logvol_data.name))
            if tmp:
                raise StorageError(
                    _("Logical volume name \"{}\" is already in use in volume group "
                      "\"{}\".").format(logvol_data.name, vg.name)
                )

            if not logvol_data.percent and size and not logvol_data.grow and size < vg.pe_size:
                raise StorageError(
                    _("Logical volume size \"{}\" must be larger than the volume "
                      "group extent size of \"{}\".").format(size, vg.pe_size)
                )

        # Now get a format to hold a lot of these extra values.
        fmt = get_format(
            ty,
            mountpoint=logvol_data.mountpoint,
            label=logvol_data.label,
            fsprofile=logvol_data.fsprofile,
            create_options=logvol_data.mkfsopts,
            mountopts=logvol_data.fsopts
        )
        if not fmt.type and not logvol_data.thin_pool:
            raise StorageError(
                _("The \"{}\" file system type is not supported.").format(ty)
            )

        add_fstab_swap = None
        # If we were given a pre-existing LV to create a filesystem on, we need
        # to verify it and its VG exists and then schedule a new format action
        # to take place there.  Also, we only support a subset of all the
        # options on pre-existing LVs.
        if logvol_data.preexist:
            device = devicetree.get_device_by_name("%s-%s" % (vg.name, logvol_data.name))
            if not device:
                raise StorageError(
                    _("Logical volume \"{}\" given in logvol command does "
                      "not exist.").format(logvol_data.name)
                )

            storage.devicetree.recursive_remove(device, remove_device=False)

            if logvol_data.resize:
                size = device.raw_device.align_target_size(size)
                try:
                    devicetree.actions.add(ActionResizeDevice(device, size))
                except ValueError as e:
                    self._handle_invalid_target_size(e, logvol_data.size, device.name)

            devicetree.actions.add(ActionCreateFormat(device, fmt))
            if ty == "swap":
                add_fstab_swap = device
        else:
            # If a previous device has claimed this mount point, delete the
            # old one.
            try:
                if logvol_data.mountpoint:
                    device = storage.mountpoints[logvol_data.mountpoint]
                    storage.destroy_device(device)
            except KeyError:
                pass

            if logvol_data.thin_volume:
                parents = [pool]
            else:
                parents = [vg]

            pool_args = {}
            if logvol_data.thin_pool:
                if logvol_data.profile:
                    matching = (p for p in KNOWN_THPOOL_PROFILES if p.name == logvol_data.profile)
                    profile = next(matching, None)
                    if profile:
                        pool_args["profile"] = profile
                    else:
                        log.warning(
                            "No matching profile for %s found in LVM configuration",
                            logvol_data.profile
                        )
                if logvol_data.metadata_size:
                    pool_args["metadata_size"] = Size("%d MiB" % logvol_data.metadata_size)
                if logvol_data.chunk_size:
                    pool_args["chunk_size"] = Size("%d KiB" % logvol_data.chunk_size)

            if logvol_data.maxSizeMB:
                maxsize = self._get_size(logvol_data.maxSizeMB, "MiB")
            else:
                maxsize = None

            if logvol_data.cache_size and logvol_data.cache_pvs:
                pv_devices = self._get_cache_pv_devices(devicetree, logvol_data)
                cache_size = Size("%d MiB" % logvol_data.cache_size)
                cache_mode = logvol_data.cache_mode or None
                cache_request = LVMCacheRequest(cache_size, pv_devices, cache_mode)
            else:
                cache_request = None

            request = storage.new_lv(
                fmt=fmt,
                name=logvol_data.name,
                parents=parents,
                size=size,
                thin_pool=logvol_data.thin_pool,
                thin_volume=logvol_data.thin_volume,
                grow=logvol_data.grow,
                maxsize=maxsize,
                percent=logvol_data.percent,
                cache_request=cache_request,
                **pool_args
            )

            storage.create_device(request)
            if ty == "swap":
                add_fstab_swap = request

        if logvol_data.encrypted:
            passphrase = self._get_passphrase(logvol_data)
            cert = storage.get_escrow_certificate(logvol_data.escrowcert)

            # Get the version of LUKS and PBKDF arguments.
            logvol_data.luks_version = logvol_data.luks_version or storage.default_luks_version

            pbkdf_args = get_pbkdf_args(
                luks_version=logvol_data.luks_version,
                pbkdf_type=logvol_data.pbkdf,
                max_memory_kb=logvol_data.pbkdf_memory,
                iterations=logvol_data.pbkdf_iterations,
                time_ms=logvol_data.pbkdf_time
            )

            if pbkdf_args and not luks_data.pbkdf_args:
                luks_data.pbkdf_args = pbkdf_args

            if logvol_data.preexist:
                luksformat = fmt
                device.format = get_format(
                    "luks",
                    passphrase=passphrase,
                    device=device.path,
                    cipher=logvol_data.cipher,
                    escrow_cert=cert,
                    add_backup_passphrase=logvol_data.backuppassphrase,
                    luks_version=logvol_data.luks_version,
                    pbkdf_args=pbkdf_args
                )
                luksdev = LUKSDevice(
                    "luks%d" % storage.next_id,
                    fmt=luksformat,
                    parents=device
                )
            else:
                luksformat = request.format
                request.format = get_format(
                    "luks",
                    passphrase=passphrase,
                    cipher=logvol_data.cipher,
                    escrow_cert=cert,
                    add_backup_passphrase=logvol_data.backuppassphrase,
                    luks_version=logvol_data.luks_version,
                    pbkdf_args=pbkdf_args
                )
                luksdev = LUKSDevice(
                    "luks%d" % storage.next_id,
                    fmt=luksformat,
                    parents=request
                )

            if ty == "swap":
                # swap is on the LUKS device not on the LUKS' parent device,
                # override the info here
                add_fstab_swap = luksdev

            storage.create_device(luksdev)

        if add_fstab_swap:
            storage.add_fstab_swap(add_fstab_swap)
Exemple #18
0
    def add_device(self, user_input):
        """ Create new device

            :param user_input: selected parameters from AddDialog
            :type user_input: class UserInput
            :returns: new device name
            :rtype: str

        """

        actions = []

        if user_input.device_type == "partition":

            if user_input.encrypt:
                dev = PartitionDevice(
                    name="req%d" % self.storage.nextID,
                    size=user_input.size,
                    parents=[i[0] for i in user_input.parents])
                actions.append(blivet.deviceaction.ActionCreateDevice(dev))

                fmt = blivet.formats.getFormat(
                    fmt_type="luks",
                    passphrase=user_input.passphrase,
                    device=dev.path)
                actions.append(blivet.deviceaction.ActionCreateFormat(
                    dev, fmt))

                luks_dev = LUKSDevice("luks-%s" % dev.name,
                                      fmt=blivet.formats.getFormat(
                                          user_input.filesystem,
                                          device=dev.path,
                                          mountpoint=user_input.mountpoint),
                                      size=dev.size,
                                      parents=[dev])

                actions.append(
                    blivet.deviceaction.ActionCreateDevice(luks_dev))

            else:
                new_part = PartitionDevice(
                    name="req%d" % self.storage.nextID,
                    size=user_input.size,
                    parents=[i[0] for i in user_input.parents],
                    partType=PARTITION_TYPE[user_input.advanced["parttype"]])

                actions.append(
                    blivet.deviceaction.ActionCreateDevice(new_part))

                if user_input.advanced["parttype"] != "extended":
                    new_fmt = blivet.formats.getFormat(
                        fmt_type=user_input.filesystem,
                        label=user_input.label,
                        mountpoint=user_input.mountpoint)

                    actions.append(
                        blivet.deviceaction.ActionCreateFormat(
                            new_part, new_fmt))

        elif user_input.device_type == "lvm" and not user_input.encrypt:

            device_name = self._pick_device_name(user_input.name)

            pvs = []

            # exact total size of newly created pvs (future parents)
            total_size = blivet.size.Size("0 MiB")

            for parent, size in user_input.parents:

                dev = PartitionDevice(name="req%d" % self.storage.nextID,
                                      size=size,
                                      parents=parent)
                ac_part = blivet.deviceaction.ActionCreateDevice(dev)

                fmt = blivet.formats.getFormat(fmt_type="lvmpv")
                ac_fmt = blivet.deviceaction.ActionCreateFormat(dev, fmt)

                actions.extend([ac_part, ac_fmt])

                total_size += dev.size

                # we need to try to create pvs immediately, if something
                # fails, fail now
                try:
                    for ac in (ac_part, ac_fmt):
                        self.storage.devicetree.registerAction(ac)

                except blivet.errors.PartitioningError as e:
                    return ReturnList(success=False,
                                      actions=None,
                                      message=None,
                                      exception=e,
                                      traceback=sys.exc_info()[2])

                pvs.append(dev)

            new_vg = LVMVolumeGroupDevice(size=total_size,
                                          parents=pvs,
                                          name=device_name,
                                          peSize=user_input.advanced["pesize"])

            actions.append(blivet.deviceaction.ActionCreateDevice(new_vg))

        elif user_input.device_type == "lvm" and user_input.encrypt:

            device_name = self._pick_device_name(user_input.name)

            lukses = []

            # exact total size of newly created pvs (future parents)
            total_size = blivet.size.Size("0 MiB")

            for parent, size in user_input.parents:
                dev = PartitionDevice(name="req%d" % self.storage.nextID,
                                      size=user_input.size,
                                      parents=[parent])
                ac_part = blivet.deviceaction.ActionCreateDevice(dev)

                fmt = blivet.formats.getFormat(
                    fmt_type="luks",
                    passphrase=user_input.passphrase,
                    device=dev.path)
                ac_fmt = blivet.deviceaction.ActionCreateFormat(dev, fmt)

                luks_dev = LUKSDevice("luks-%s" % dev.name,
                                      fmt=blivet.formats.getFormat(
                                          "lvmpv", device=dev.path),
                                      size=dev.size,
                                      parents=[dev])
                ac_luks = blivet.deviceaction.ActionCreateDevice(luks_dev)

                actions.extend([ac_part, ac_fmt, ac_luks])

                total_size += luks_dev.size

                # we need to try to create pvs immediately, if something
                # fails, fail now
                try:
                    for ac in (ac_part, ac_fmt, ac_luks):
                        self.storage.devicetree.registerAction(ac)

                except blivet.errors.PartitioningError as e:
                    return ReturnList(success=False,
                                      actions=None,
                                      message=None,
                                      exception=e,
                                      traceback=sys.exc_info()[2])

                lukses.append(luks_dev)

            new_vg = LVMVolumeGroupDevice(size=total_size,
                                          parents=lukses,
                                          name=device_name,
                                          peSize=user_input.advanced["pesize"])

            actions.append(blivet.deviceaction.ActionCreateDevice(new_vg))

        elif user_input.device_type == "lvmlv":

            device_name = self._pick_device_name(user_input.name,
                                                 user_input.parents[0][0])

            new_part = LVMLogicalVolumeDevice(
                name=device_name,
                size=user_input.size,
                parents=[i[0] for i in user_input.parents])

            actions.append(blivet.deviceaction.ActionCreateDevice(new_part))

            new_fmt = blivet.formats.getFormat(
                fmt_type=user_input.filesystem,
                mountpoint=user_input.mountpoint)

            actions.append(
                blivet.deviceaction.ActionCreateFormat(new_part, new_fmt))

        elif user_input.device_type == "lvmvg":

            device_name = self._pick_device_name(user_input.name)

            new_vg = LVMVolumeGroupDevice(
                size=user_input.size,
                name=device_name,
                parents=[i[0] for i in user_input.parents],
                peSize=user_input.advanced["pesize"])

            actions.append(blivet.deviceaction.ActionCreateDevice(new_vg))

        elif user_input.device_type == "lvmpv":

            if user_input.encrypt:

                dev = PartitionDevice(
                    name="req%d" % self.storage.nextID,
                    size=user_input.size,
                    parents=[i[0] for i in user_input.parents])
                actions.append(blivet.deviceaction.ActionCreateDevice(dev))

                fmt = blivet.formats.getFormat(
                    fmt_type="luks",
                    passphrase=user_input.passphrase,
                    device=dev.path)
                actions.append(blivet.deviceaction.ActionCreateFormat(
                    dev, fmt))

                luks_dev = LUKSDevice("luks-%s" % dev.name,
                                      fmt=blivet.formats.getFormat(
                                          "lvmpv", device=dev.path),
                                      size=dev.size,
                                      parents=[dev])
                actions.append(
                    blivet.deviceaction.ActionCreateDevice(luks_dev))

            else:
                dev = PartitionDevice(
                    name="req%d" % self.storage.nextID,
                    size=user_input.size,
                    parents=[i[0] for i in user_input.parents])
                actions.append(blivet.deviceaction.ActionCreateDevice(dev))

                fmt = blivet.formats.getFormat(fmt_type="lvmpv")
                actions.append(blivet.deviceaction.ActionCreateFormat(
                    dev, fmt))

        elif user_input.device_type == "btrfs volume":

            device_name = self._pick_device_name(user_input.name)

            # for btrfs we need to create parents first -- currently selected "parents" are
            # disks but "real parents" for subvolume are btrfs formatted partitions
            btrfs_parents = []

            # exact total size of newly created partitions (future parents)
            total_size = blivet.size.Size("0 MiB")

            for parent, size in user_input.parents:

                if user_input.btrfs_type == "disks":
                    assert parent.isDisk

                    fmt = blivet.formats.getFormat(fmt_type="btrfs")
                    ac_fmt = blivet.deviceaction.ActionCreateFormat(
                        parent, fmt)

                    actions.append(ac_fmt)

                    try:
                        self.storage.devicetree.registerAction(ac_fmt)

                    except Exception as e:  # pylint: disable=broad-except
                        return ReturnList(success=False,
                                          actions=None,
                                          message=None,
                                          exception=e,
                                          traceback=sys.exc_info()[2])

                    total_size += size
                    btrfs_parents.append(parent)

                else:

                    dev = PartitionDevice(name="req%d" % self.storage.nextID,
                                          size=size,
                                          parents=[parent])
                    ac_part = blivet.deviceaction.ActionCreateDevice(dev)

                    fmt = blivet.formats.getFormat(fmt_type="btrfs")
                    ac_fmt = blivet.deviceaction.ActionCreateFormat(dev, fmt)

                    actions.extend([ac_part, ac_fmt])

                    total_size += dev.size

                    # we need to try to create partitions immediately, if something
                    # fails, fail now
                    try:
                        for ac in (ac_part, ac_fmt):
                            self.storage.devicetree.registerAction(ac)

                    except blivet.errors.PartitioningError as e:
                        return ReturnList(success=False,
                                          actions=None,
                                          message=None,
                                          exception=e,
                                          traceback=sys.exc_info()[2])

                    btrfs_parents.append(dev)

            new_btrfs = BTRFSVolumeDevice(device_name,
                                          size=total_size,
                                          parents=btrfs_parents)
            new_btrfs.format = blivet.formats.getFormat(
                "btrfs", label=device_name, mountpoint=user_input.mountpoint)
            actions.append(blivet.deviceaction.ActionCreateDevice(new_btrfs))

        elif user_input.device_type == "btrfs subvolume":

            device_name = self._pick_device_name(user_input.name,
                                                 user_input.parents[0][0])

            new_btrfs = BTRFSSubVolumeDevice(
                device_name, parents=[i[0] for i in user_input.parents])
            new_btrfs.format = blivet.formats.getFormat(
                "btrfs", mountpoint=user_input.mountpoint)
            actions.append(blivet.deviceaction.ActionCreateDevice(new_btrfs))

        elif user_input.device_type == "mdraid":
            device_name = self._pick_device_name(user_input.name)

            parts = []

            # exact total size of newly created pvs (future parents)
            total_size = blivet.size.Size("0 MiB")

            for parent, size in user_input.parents:

                dev = PartitionDevice(name="req%d" % self.storage.nextID,
                                      size=size,
                                      parents=[parent])
                ac_part = blivet.deviceaction.ActionCreateDevice(dev)

                fmt = blivet.formats.getFormat(fmt_type="mdmember")
                ac_fmt = blivet.deviceaction.ActionCreateFormat(dev, fmt)

                actions.extend([ac_part, ac_fmt])

                total_size += dev.size

                # we need to try to create pvs immediately, if something
                # fails, fail now
                try:
                    for ac in (ac_part, ac_fmt):
                        self.storage.devicetree.registerAction(ac)

                except blivet.errors.PartitioningError as e:
                    return ReturnList(success=False,
                                      actions=None,
                                      message=None,
                                      exception=e,
                                      traceback=sys.exc_info()[2])

                parts.append(dev)

            new_md = MDRaidArrayDevice(size=total_size,
                                       parents=parts,
                                       name=device_name,
                                       level=user_input.raid_level,
                                       memberDevices=len(parts),
                                       totalDevices=len(parts))
            actions.append(blivet.deviceaction.ActionCreateDevice(new_md))

            fmt = blivet.formats.getFormat(fmt_type=user_input.filesystem)
            actions.append(blivet.deviceaction.ActionCreateFormat(new_md, fmt))

        try:
            for ac in actions:
                if not ac._applied:
                    self.storage.devicetree.registerAction(ac)

            blivet.partitioning.doPartitioning(self.storage)

        except Exception as e:  # pylint: disable=broad-except
            return ReturnList(success=False,
                              actions=None,
                              message=None,
                              exception=e,
                              traceback=sys.exc_info()[2])

        return ReturnList(success=True,
                          actions=actions,
                          message=None,
                          exception=None,
                          traceback=None)
    def _execute_raid_data(self, storage, data, raid_data):
        """Execute the raid data.

        :param storage: an instance of the Blivet's storage object
        :param data: an instance of kickstart data
        :param raid_data: an instance of RaidData
        """
        raidmems = []
        devicetree = storage.devicetree
        devicename = raid_data.device
        if raid_data.preexist:
            device = devicetree.resolve_device(devicename)
            if device:
                devicename = device.name

        kwargs = {}

        if raid_data.mountpoint == "swap":
            ty = "swap"
            raid_data.mountpoint = ""
        elif raid_data.mountpoint.startswith("pv."):
            ty = "lvmpv"
            kwargs["name"] = raid_data.mountpoint
            data.onPart[kwargs["name"]] = devicename

            if devicetree.get_device_by_name(kwargs["name"]):
                raise StorageError(
                    _("PV partition \"{}\" is defined multiple "
                      "times.").format(kwargs["name"])
                )

            raid_data.mountpoint = ""
        elif raid_data.mountpoint.startswith("btrfs."):
            ty = "btrfs"
            kwargs["name"] = raid_data.mountpoint
            data.onPart[kwargs["name"]] = devicename

            if devicetree.get_device_by_name(kwargs["name"]):
                raise StorageError(
                    _("Btrfs partition \"{}\" is defined multiple "
                      "times.").format(kwargs["name"])
                )

            raid_data.mountpoint = ""
        else:
            if raid_data.fstype != "":
                ty = raid_data.fstype
            elif (raid_data.mountpoint == "/boot"
                  and "mdarray" in storage.bootloader.stage2_device_types):
                ty = storage.default_boot_fstype
            else:
                ty = storage.default_fstype

        # Sanity check mountpoint
        self._check_mount_point(raid_data.mountpoint)

        # If this specifies an existing request that we should not format,
        # quit here after setting up enough information to mount it later.
        if not raid_data.format:
            if not devicename:
                raise StorageError(
                    _("raid --noformat must also use the --device option.")
                )

            dev = devicetree.get_device_by_name(devicename)
            if not dev:
                raise StorageError(
                    _("RAID device  \"{}\" given in raid command does "
                      "not exist.").format(devicename)
                )

            dev.format.mountpoint = raid_data.mountpoint
            dev.format.mountopts = raid_data.fsopts
            if ty == "swap":
                storage.add_fstab_swap(dev)
            return

        # Get a list of all the RAID members.
        for member in raid_data.members:
            dev = devicetree.resolve_device(member)
            if not dev:
                # if member is using --onpart, use original device
                mem = data.onPart.get(member, member)
                dev = devicetree.resolve_device(mem) or lookup_alias(devicetree, member)
            if dev and dev.format.type == "luks":
                try:
                    dev = dev.children[0]
                except IndexError:
                    dev = None

            if dev and dev.format.type != "mdmember":
                raise StorageError(
                    _("RAID device \"{}\" has a format of \"{}\", but should have "
                      "a format of \"mdmember\".").format(member, dev.format.type)
                )

            if not dev:
                raise StorageError(
                    _("Tried to use undefined partition \"{}\" in RAID "
                      "specification.").format(member)
                )

            raidmems.append(dev)

        # Now get a format to hold a lot of these extra values.
        kwargs["fmt"] = get_format(
            ty,
            label=raid_data.label,
            fsprofile=raid_data.fsprofile,
            mountpoint=raid_data.mountpoint,
            mountopts=raid_data.fsopts,
            create_options=raid_data.mkfsopts
        )

        if not kwargs["fmt"].type:
            raise StorageError(
                _("The \"{}\" file system type is not supported.").format(ty)
            )

        kwargs["name"] = devicename
        kwargs["level"] = raid_data.level
        kwargs["parents"] = raidmems
        kwargs["member_devices"] = len(raidmems) - raid_data.spares
        kwargs["total_devices"] = len(raidmems)

        if raid_data.chunk_size:
            kwargs["chunk_size"] = Size("%d KiB" % raid_data.chunk_size)

        add_fstab_swap = None

        # If we were given a pre-existing RAID to create a filesystem on,
        # we need to verify it exists and then schedule a new format action
        # to take place there.  Also, we only support a subset of all the
        # options on pre-existing RAIDs.
        if raid_data.preexist:
            device = devicetree.get_device_by_name(devicename)

            if not device:
                raise StorageError(
                    _("RAID volume \"{}\" specified with --useexisting does "
                      "not exist.").format(devicename)
                )

            storage.devicetree.recursive_remove(device, remove_device=False)
            devicetree.actions.add(ActionCreateFormat(device, kwargs["fmt"]))
            if ty == "swap":
                add_fstab_swap = device
        else:
            if devicename and devicename in (a.name for a in storage.mdarrays):
                raise StorageError(
                    _("The RAID volume name \"{}\" is already in use.").format(devicename)
                )

            # If a previous device has claimed this mount point, delete the
            # old one.
            try:
                if raid_data.mountpoint:
                    device = storage.mountpoints[raid_data.mountpoint]
                    storage.destroy_device(device)
            except KeyError:
                pass

            request = storage.new_mdarray(**kwargs)
            storage.create_device(request)

            if ty == "swap":
                add_fstab_swap = request

        if raid_data.encrypted:
            passphrase = self._get_passphrase(raid_data)
            cert = storage.get_escrow_certificate(raid_data.escrowcert)

            # Get the version of LUKS and PBKDF arguments.
            raid_data.luks_version = raid_data.luks_version or storage.default_luks_version

            pbkdf_args = get_pbkdf_args(
                luks_version=raid_data.luks_version,
                pbkdf_type=raid_data.pbkdf,
                max_memory_kb=raid_data.pbkdf_memory,
                iterations=raid_data.pbkdf_iterations,
                time_ms=raid_data.pbkdf_time
            )

            if pbkdf_args and not luks_data.pbkdf_args:
                luks_data.pbkdf_args = pbkdf_args

            if raid_data.preexist:
                luksformat = kwargs["fmt"]
                device.format = get_format(
                    "luks",
                    passphrase=passphrase,
                    device=device.path,
                    cipher=raid_data.cipher,
                    escrow_cert=cert,
                    add_backup_passphrase=raid_data.backuppassphrase,
                    luks_version=raid_data.luks_version,
                    pbkdf_args=pbkdf_args
                )
                luksdev = LUKSDevice(
                    "luks%d" % storage.next_id,
                    fmt=luksformat,
                    parents=device
                )
            else:
                luksformat = request.format
                request.format = get_format(
                    "luks",
                    passphrase=passphrase,
                    cipher=raid_data.cipher,
                    escrow_cert=cert,
                    add_backup_passphrase=raid_data.backuppassphrase,
                    luks_version=raid_data.luks_version,
                    pbkdf_args=pbkdf_args
                )
                luksdev = LUKSDevice(
                    "luks%d" % storage.next_id,
                    fmt=luksformat,
                    parents=request
                )

            if ty == "swap":
                # swap is on the LUKS device instead of the parent device,
                # override the device here
                add_fstab_swap = luksdev

            storage.create_device(luksdev)

        if add_fstab_swap:
            storage.add_fstab_swap(add_fstab_swap)
Exemple #20
0
 def _get_device(self, *args, **kwargs):
     exists = kwargs.get("exists", False)
     parent = StorageDevice(*args,
                            size=kwargs["size"] + crypto.LUKS_METADATA_SIZE,
                            exists=exists)
     return LUKSDevice(*args, **kwargs, parents=[parent])