def test_availability_mdraidplugin(self): availability.CACHE_AVAILABILITY = False availability.BLOCKDEV_DM_PLUGIN._method = availability.AvailableMethod # if the plugin is not in, there's nothing to test self.assertIn(availability.BLOCKDEV_MDRAID_PLUGIN, self.luks.external_dependencies) # dev is not among its unavailable dependencies availability.BLOCKDEV_MDRAID_PLUGIN._method = availability.AvailableMethod availability.MKFS_HFSPLUS_APP._method = availability.AvailableMethod # macefi self.assertNotIn(availability.BLOCKDEV_MDRAID_PLUGIN, self.luks.unavailable_dependencies) self.assertIsNotNone(ActionCreateDevice(self.luks)) self.assertIsNotNone(ActionDestroyDevice(self.luks)) self.assertIsNotNone( ActionCreateFormat(self.luks, fmt=get_format("macefi"))) self.assertIsNotNone(ActionDestroyFormat(self.luks)) # dev is among the unavailable dependencies availability.BLOCKDEV_MDRAID_PLUGIN._method = availability.UnavailableMethod self.assertIn(availability.BLOCKDEV_MDRAID_PLUGIN, self.luks.unavailable_dependencies) with self.assertRaises(ValueError): ActionCreateDevice(self.luks) with self.assertRaises(ValueError): ActionDestroyDevice(self.dev) with self.assertRaises(ValueError): ActionCreateFormat(self.dev) with self.assertRaises(ValueError): ActionDestroyFormat(self.dev)
def testActionCreation(self): """ Verify correct operation of action class constructors. """ # instantiation of device resize action for non-existent device should # fail # XXX resizable depends on existence, so this is covered implicitly sdd = self.storage.devicetree.getDeviceByName("sdd") p = self.newDevice(device_class=PartitionDevice, name="sdd1", size=Size("32 GiB"), parents=[sdd]) with self.assertRaises(ValueError): ActionResizeDevice(p, p.size + Size("7232 MiB")) # instantiation of device resize action for non-resizable device # should fail vg = self.storage.devicetree.getDeviceByName("VolGroup") self.assertNotEqual(vg, None) with self.assertRaises(ValueError): ActionResizeDevice(vg, vg.size + Size("32 MiB")) # instantiation of format resize action for non-resizable format type # should fail lv_swap = self.storage.devicetree.getDeviceByName("VolGroup-lv_swap") self.assertNotEqual(lv_swap, None) with self.assertRaises(ValueError): ActionResizeFormat(lv_swap, lv_swap.size + Size("32 MiB")) # instantiation of format resize action for non-existent format # should fail lv_root = self.storage.devicetree.getDeviceByName("VolGroup-lv_root") self.assertNotEqual(lv_root, None) lv_root.format.exists = False with self.assertRaises(ValueError): ActionResizeFormat(lv_root, lv_root.size - Size("1000 MiB")) lv_root.format.exists = True # instantiation of device create action for existing device should # fail lv_swap = self.storage.devicetree.getDeviceByName("VolGroup-lv_swap") self.assertNotEqual(lv_swap, None) self.assertEqual(lv_swap.exists, True) with self.assertRaises(ValueError): ActionCreateDevice(lv_swap) # instantiation of format destroy action for device causes device's # format attribute to be a DeviceFormat instance lv_swap = self.storage.devicetree.getDeviceByName("VolGroup-lv_swap") self.assertNotEqual(lv_swap, None) orig_format = lv_swap.format self.assertEqual(lv_swap.format.type, "swap") destroy_swap = ActionDestroyFormat(lv_swap) self.assertEqual(lv_swap.format.type, "swap") destroy_swap.apply() self.assertEqual(lv_swap.format.type, None) # instantiation of format create action for device causes new format # to be accessible via device's format attribute new_format = getFormat("vfat", device=lv_swap.path) create_swap = ActionCreateFormat(lv_swap, new_format) self.assertEqual(lv_swap.format.type, None) create_swap.apply() self.assertEqual(lv_swap.format, new_format) lv_swap.format = orig_format
def testActionDependencies(self): """ Verify correct functioning of action dependencies. """ # ActionResizeDevice # an action that shrinks a device should require the action that # shrinks the device's format lv_root = self.storage.devicetree.getDeviceByName("VolGroup-lv_root") self.assertNotEqual(lv_root, None) lv_root.format._minInstanceSize = Size("10 MiB") lv_root.format._targetSize = lv_root.format._minInstanceSize # lv_root.format._resizable = True shrink_format = ActionResizeFormat(lv_root, lv_root.size - Size("5 GiB")) shrink_format.apply() shrink_device = ActionResizeDevice(lv_root, lv_root.size - Size("5 GiB")) shrink_device.apply() self.assertEqual(shrink_device.requires(shrink_format), True) self.assertEqual(shrink_format.requires(shrink_device), False) shrink_format.cancel() shrink_device.cancel() # ActionResizeDevice # an action that grows a format should require the action that # grows the device orig_size = lv_root.currentSize grow_device = ActionResizeDevice(lv_root, orig_size + Size("100 MiB")) grow_device.apply() grow_format = ActionResizeFormat(lv_root, orig_size + Size("100 MiB")) grow_format.apply() self.assertEqual(grow_format.requires(grow_device), True) self.assertEqual(grow_device.requires(grow_format), False) # create something like uncommitted autopart self.destroyAllDevices() sda = self.storage.devicetree.getDeviceByName("sda") sdb = self.storage.devicetree.getDeviceByName("sdb") sda1 = self.newDevice(device_class=PartitionDevice, name="sda1", size=Size("500 MiB"), parents=[sda]) sda1_format = self.newFormat("ext4", mountpoint="/boot", device=sda1.path) self.scheduleCreateDevice(sda1) self.scheduleCreateFormat(device=sda1, fmt=sda1_format) sda2 = self.newDevice(device_class=PartitionDevice, name="sda2", size=Size("99.5 GiB"), parents=[sda]) sda2_format = self.newFormat("lvmpv", device=sda2.path) self.scheduleCreateDevice(sda2) self.scheduleCreateFormat(device=sda2, fmt=sda2_format) sdb1 = self.newDevice(device_class=PartitionDevice, name="sdb1", size=Size("100 GiB"), parents=[sdb]) sdb1_format = self.newFormat("lvmpv", device=sdb1.path) self.scheduleCreateDevice(sdb1) self.scheduleCreateFormat(device=sdb1, fmt=sdb1_format) vg = self.newDevice(device_class=LVMVolumeGroupDevice, name="VolGroup", parents=[sda2, sdb1]) self.scheduleCreateDevice(vg) lv_root = self.newDevice( device_class=LVMLogicalVolumeDevice, name="lv_root", parents=[vg], size=Size("160 GiB") ) self.scheduleCreateDevice(lv_root) fmt = self.newFormat("ext4", device=lv_root.path, mountpoint="/") self.scheduleCreateFormat(device=lv_root, fmt=fmt) lv_swap = self.newDevice(device_class=LVMLogicalVolumeDevice, name="lv_swap", parents=[vg], size=Size("4 GiB")) self.scheduleCreateDevice(lv_swap) fmt = self.newFormat("swap", device=lv_swap.path) self.scheduleCreateFormat(device=lv_swap, fmt=fmt) # ActionCreateDevice # creation of an LV should require the actions that create the VG, # its PVs, and the devices that contain the PVs lv_root = self.storage.devicetree.getDeviceByName("VolGroup-lv_root") self.assertNotEqual(lv_root, None) actions = self.storage.devicetree.actions.find(action_type="create", object_type="device", device=lv_root) self.assertEqual(len(actions), 1, "wrong number of device create actions for lv_root: " "%d" % len(actions)) create_lv_action = actions[0] vgs = [d for d in self.storage.vgs if d.name == "VolGroup"] self.assertNotEqual(vgs, []) vg = vgs[0] actions = self.storage.devicetree.actions.find(action_type="create", object_type="device", device=vg) self.assertEqual(len(actions), 1, "wrong number of device create actions for VolGroup") create_vg_action = actions[0] self.assertEqual(create_lv_action.requires(create_vg_action), True) create_pv_actions = [] pvs = [d for d in self.storage.pvs if d in vg.pvs] self.assertNotEqual(pvs, []) for pv in pvs: # include device and format create actions for each pv actions = self.storage.devicetree.actions.find(action_type="create", device=pv) self.assertEqual(len(actions), 2, "wrong number of device create actions for " "pv %s" % pv.name) create_pv_actions.append(actions[0]) for pv_action in create_pv_actions: self.assertEqual(create_lv_action.requires(pv_action), True) # also check that the vg create action requires the pv actions self.assertEqual(create_vg_action.requires(pv_action), True) # ActionCreateDevice # the higher numbered partition of two that are scheduled to be # created on a single disk should require the action that creates the # lower numbered of the two, eg: create sda2 before creating sda3 sdc = self.storage.devicetree.getDeviceByName("sdc") self.assertNotEqual(sdc, None) sdc1 = self.newDevice(device_class=PartitionDevice, name="sdc1", parents=[sdc], size=Size("50 GiB")) create_sdc1 = self.scheduleCreateDevice(sdc1) self.assertEqual(isinstance(create_sdc1, ActionCreateDevice), True) sdc2 = self.newDevice(device_class=PartitionDevice, name="sdc2", parents=[sdc], size=Size("50 GiB")) create_sdc2 = self.scheduleCreateDevice(sdc2) self.assertEqual(isinstance(create_sdc2, ActionCreateDevice), True) self.assertEqual(create_sdc2.requires(create_sdc1), True) self.assertEqual(create_sdc1.requires(create_sdc2), False) # ActionCreateDevice # actions that create partitions on two separate disks should not # require each other, regardless of the partitions' numbers sda1 = self.storage.devicetree.getDeviceByName("sda1") self.assertNotEqual(sda1, None) actions = self.storage.devicetree.actions.find(action_type="create", object_type="device", device=sda1) self.assertEqual(len(actions), 1, "wrong number of create actions found for sda1") create_sda1 = actions[0] self.assertEqual(create_sdc2.requires(create_sda1), False) self.assertEqual(create_sda1.requires(create_sdc1), False) # ActionDestroyDevice # an action that destroys a device containing an mdmember format # should require the action that destroys the md array it is a # member of if an array is defined self.destroyAllDevices(disks=["sdc", "sdd"]) sdc = self.storage.devicetree.getDeviceByName("sdc") self.assertNotEqual(sdc, None) sdd = self.storage.devicetree.getDeviceByName("sdd") self.assertNotEqual(sdd, None) sdc1 = self.newDevice(device_class=PartitionDevice, name="sdc1", parents=[sdc], size=Size("40 GiB")) self.scheduleCreateDevice(sdc1) fmt = self.newFormat("mdmember", device=sdc1.path) self.scheduleCreateFormat(device=sdc1, fmt=fmt) sdd1 = self.newDevice(device_class=PartitionDevice, name="sdd1", parents=[sdd], size=Size("40 GiB")) self.scheduleCreateDevice(sdd1) fmt = self.newFormat("mdmember", device=sdd1.path) self.scheduleCreateFormat(device=sdd1, fmt=fmt) md0 = self.newDevice( device_class=MDRaidArrayDevice, name="md0", level="raid0", minor=0, memberDevices=2, totalDevices=2, parents=[sdc1, sdd1], ) self.scheduleCreateDevice(md0) fmt = self.newFormat("ext4", device=md0.path, mountpoint="/home") self.scheduleCreateFormat(device=md0, fmt=fmt) destroy_md0_format = self.scheduleDestroyFormat(md0) destroy_md0 = self.scheduleDestroyDevice(md0) destroy_members = [self.scheduleDestroyDevice(sdc1)] destroy_members.append(self.scheduleDestroyDevice(sdd1)) for member in destroy_members: # device and format destroy actions for md members should require # both device and format destroy actions for the md array for array in [destroy_md0_format, destroy_md0]: self.assertEqual(member.requires(array), True) # ActionDestroyDevice # when there are two actions that will each destroy a partition on the # same disk, the action that will destroy the lower-numbered # partition should require the action that will destroy the higher- # numbered partition, eg: destroy sda2 before destroying sda1 self.destroyAllDevices(disks=["sdc", "sdd"]) sdc1 = self.newDevice(device_class=PartitionDevice, name="sdc1", parents=[sdc], size=Size("50 GiB")) self.scheduleCreateDevice(sdc1) sdc2 = self.newDevice(device_class=PartitionDevice, name="sdc2", parents=[sdc], size=Size("40 GiB")) self.scheduleCreateDevice(sdc2) destroy_sdc1 = self.scheduleDestroyDevice(sdc1) destroy_sdc2 = self.scheduleDestroyDevice(sdc2) self.assertEqual(destroy_sdc1.requires(destroy_sdc2), True) self.assertEqual(destroy_sdc2.requires(destroy_sdc1), False) self.destroyAllDevices(disks=["sdc", "sdd"]) sdc = self.storage.devicetree.getDeviceByName("sdc") self.assertNotEqual(sdc, None) sdd = self.storage.devicetree.getDeviceByName("sdd") self.assertNotEqual(sdd, None) sdc1 = self.newDevice(device_class=PartitionDevice, name="sdc1", parents=[sdc], size=Size("50 GiB")) create_pv = self.scheduleCreateDevice(sdc1) fmt = self.newFormat("lvmpv", device=sdc1.path) create_pv_format = self.scheduleCreateFormat(device=sdc1, fmt=fmt) testvg = self.newDevice(device_class=LVMVolumeGroupDevice, name="testvg", parents=[sdc1]) create_vg = self.scheduleCreateDevice(testvg) testlv = self.newDevice( device_class=LVMLogicalVolumeDevice, name="testlv", parents=[testvg], size=Size("30 GiB") ) create_lv = self.scheduleCreateDevice(testlv) fmt = self.newFormat("ext4", device=testlv.path) create_lv_format = self.scheduleCreateFormat(device=testlv, fmt=fmt) # ActionCreateFormat # creation of a format on a non-existent device should require the # action that creates the device self.assertEqual(create_lv_format.requires(create_lv), True) # ActionCreateFormat # an action that creates a format on a device should require an action # that creates a device that the format's device depends on self.assertEqual(create_lv_format.requires(create_pv), True) self.assertEqual(create_lv_format.requires(create_vg), True) # ActionCreateFormat # an action that creates a format on a device should require an action # that creates a format on a device that the format's device depends on self.assertEqual(create_lv_format.requires(create_pv_format), True) # XXX from here on, the devices are existing but not in the tree, so # we instantiate and use actions directly self.destroyAllDevices(disks=["sdc", "sdd"]) sdc1 = self.newDevice( device_class=PartitionDevice, exists=True, name="sdc1", parents=[sdc], size=Size("50 GiB") ) sdc1.format = self.newFormat("lvmpv", device=sdc1.path, exists=True, device_instance=sdc1) testvg = self.newDevice( device_class=LVMVolumeGroupDevice, exists=True, name="testvg", parents=[sdc1], size=Size("50 GiB") ) testlv = self.newDevice( device_class=LVMLogicalVolumeDevice, exists=True, size=Size("30 GiB"), name="testlv", parents=[testvg] ) testlv.format = self.newFormat("ext4", device=testlv.path, exists=True, device_instance=testlv) # ActionResizeDevice # an action that resizes a device should require an action that grows # a device that the first action's device depends on, eg: grow # device containing PV before resize of VG or LVs sdc1.format._resizable = True # override lvmpv.resizable sdc1.exists = True sdc1.format.exists = True grow_pv = ActionResizeDevice(sdc1, sdc1.size + Size("10 GiB")) grow_pv.apply() grow_lv = ActionResizeDevice(testlv, testlv.size + Size("5 GiB")) grow_lv.apply() grow_lv_format = ActionResizeFormat(testlv, testlv.size + Size("5 GiB")) grow_lv_format.apply() sdc1.exists = False sdc1.format.exists = False self.assertEqual(grow_lv.requires(grow_pv), True) self.assertEqual(grow_pv.requires(grow_lv), False) # ActionResizeFormat # an action that grows a format should require the action that grows # the format's device self.assertEqual(grow_lv_format.requires(grow_lv), True) self.assertEqual(grow_lv.requires(grow_lv_format), False) # ActionResizeFormat # an action that resizes a device's format should depend on an action # that grows a device the first device depends on self.assertEqual(grow_lv_format.requires(grow_pv), True) self.assertEqual(grow_pv.requires(grow_lv_format), False) # ActionResizeFormat # an action that resizes a device's format should depend on an action # that grows a format on a device the first device depends on # XXX resize of PV format is not allowed, so there's no real-life # example of this to test grow_lv_format.cancel() grow_lv.cancel() grow_pv.cancel() # ActionResizeDevice # an action that resizes a device should require an action that grows # a format on a device that the first action's device depends on, eg: # grow PV format before resize of VG or LVs # XXX resize of PV format is not allowed, so there's no real-life # example of this to test # ActionResizeDevice # an action that resizes a device should require an action that # shrinks a device that depends on the first action's device, eg: # shrink LV before resizing VG or PV devices testlv.format._minInstanceSize = Size("10 MiB") testlv.format._targetSize = testlv.format._minInstanceSize shrink_lv = ActionResizeDevice(testlv, testlv.size - Size("10 GiB")) shrink_lv.apply() sdc1.exists = True sdc1.format.exists = True shrink_pv = ActionResizeDevice(sdc1, sdc1.size - Size("5 GiB")) shrink_pv.apply() sdc1.exists = False sdc1.format.exists = False self.assertEqual(shrink_pv.requires(shrink_lv), True) self.assertEqual(shrink_lv.requires(shrink_pv), False) # ActionResizeDevice # an action that resizes a device should require an action that # shrinks a format on a device that depends on the first action's # device, eg: shrink LV format before resizing VG or PV devices shrink_lv_format = ActionResizeFormat(testlv, testlv.size) shrink_lv_format.apply() self.assertEqual(shrink_pv.requires(shrink_lv_format), True) self.assertEqual(shrink_lv_format.requires(shrink_pv), False) # ActionResizeFormat # an action that resizes a device's format should depend on an action # that shrinks a device that depends on the first device # XXX can't think of a real-world example of this since PVs and MD # member devices are not resizable in anaconda # ActionResizeFormat # an action that resizes a device's format should depend on an action # that shrinks a format on a device that depends on the first device # XXX can't think of a real-world example of this since PVs and MD # member devices are not resizable in anaconda shrink_lv_format.cancel() shrink_lv.cancel() shrink_pv.cancel() # ActionCreateFormat # an action that creates a format on a device should require an action # that resizes a device that the format's device depends on # XXX Really? Is this always so? # ActionCreateFormat # an action that creates a format on a device should require an action # that resizes a format on a device that the format's device depends on # XXX Same as above. # ActionCreateFormat # an action that creates a format on a device should require an action # that resizes the device that will contain the format grow_lv = ActionResizeDevice(testlv, testlv.size + Size("1 GiB")) fmt = self.newFormat("disklabel", device=testlv.path) format_lv = ActionCreateFormat(testlv, fmt) self.assertEqual(format_lv.requires(grow_lv), True) self.assertEqual(grow_lv.requires(format_lv), False) # ActionDestroyFormat # an action that destroys a format should require an action that # destroys a device that depends on the format's device destroy_pv_format = ActionDestroyFormat(sdc1) destroy_lv_format = ActionDestroyFormat(testlv) destroy_lv = ActionDestroyDevice(testlv) self.assertEqual(destroy_pv_format.requires(destroy_lv), True) self.assertEqual(destroy_lv.requires(destroy_pv_format), False) # ActionDestroyFormat # an action that destroys a format should require an action that # destroys a format on a device that depends on the first format's # device self.assertEqual(destroy_pv_format.requires(destroy_lv_format), True) self.assertEqual(destroy_lv_format.requires(destroy_pv_format), False) sdc2 = self.newDevice(device_class=PartitionDevice, name="sdc2", size=Size("5 GiB"), parents=[sdc]) create_sdc2 = self.scheduleCreateDevice(sdc2) # create actions should always require destroy actions -- even for # unrelated devices -- since, after pruning, it should always be the # case that destroy actions are processed before create actions (no # create/destroy loops are allowed) self.assertEqual(create_sdc2.requires(destroy_lv), True) # similarly, create actions should also require resize actions self.assertEqual(create_sdc2.requires(grow_lv), True)
def testActionObsoletes(self): """ Verify correct operation of DeviceAction.obsoletes. """ self.destroyAllDevices(disks=["sdc"]) sdc = self.storage.devicetree.getDeviceByName("sdc") self.assertNotEqual(sdc, None) sdc1 = self.newDevice(device_class=PartitionDevice, name="sdc1", parents=[sdc], size=Size("40 GiB")) # ActionCreateDevice # # - obsoletes other ActionCreateDevice instances w/ lower id and same # device create_device_1 = ActionCreateDevice(sdc1) create_device_1.apply() create_device_2 = ActionCreateDevice(sdc1) create_device_2.apply() self.assertEqual(create_device_2.obsoletes(create_device_1), True) self.assertEqual(create_device_1.obsoletes(create_device_2), False) # ActionCreateFormat # # - obsoletes other ActionCreateFormat instances w/ lower id and same # device format_1 = self.newFormat("ext3", mountpoint="/home", device=sdc1.path) format_2 = self.newFormat("ext3", mountpoint="/opt", device=sdc1.path) create_format_1 = ActionCreateFormat(sdc1, format_1) create_format_1.apply() create_format_2 = ActionCreateFormat(sdc1, format_2) create_format_2.apply() self.assertEqual(create_format_2.obsoletes(create_format_1), True) self.assertEqual(create_format_1.obsoletes(create_format_2), False) # ActionResizeFormat # # - obsoletes other ActionResizeFormat instances w/ lower id and same # device sdc1.exists = True sdc1.format.exists = True sdc1.format._resizable = True resize_format_1 = ActionResizeFormat(sdc1, sdc1.size - Size("1000 MiB")) resize_format_1.apply() resize_format_2 = ActionResizeFormat(sdc1, sdc1.size - Size("5000 MiB")) resize_format_2.apply() self.assertEqual(resize_format_2.obsoletes(resize_format_1), True) self.assertEqual(resize_format_1.obsoletes(resize_format_2), False) sdc1.exists = False sdc1.format.exists = False # ActionCreateFormat # # - obsoletes resize format actions w/ lower id on same device new_format = self.newFormat("ext4", mountpoint="/foo", device=sdc1.path) create_format_3 = ActionCreateFormat(sdc1, new_format) create_format_3.apply() self.assertEqual(create_format_3.obsoletes(resize_format_1), True) self.assertEqual(create_format_3.obsoletes(resize_format_2), True) # ActionResizeDevice # # - obsoletes other ActionResizeDevice instances w/ lower id and same # device sdc1.exists = True sdc1.format.exists = True sdc1.format._resizable = True resize_device_1 = ActionResizeDevice(sdc1, sdc1.size + Size("10 GiB")) resize_device_1.apply() resize_device_2 = ActionResizeDevice(sdc1, sdc1.size - Size("10 GiB")) resize_device_2.apply() self.assertEqual(resize_device_2.obsoletes(resize_device_1), True) self.assertEqual(resize_device_1.obsoletes(resize_device_2), False) sdc1.exists = False sdc1.format.exists = False # ActionDestroyFormat # # - obsoletes all format actions w/ higher id on same device (including # self if format does not exist) destroy_format_1 = ActionDestroyFormat(sdc1) destroy_format_1.apply() destroy_format_2 = ActionDestroyFormat(sdc1) destroy_format_2.apply() self.assertEqual(destroy_format_1.obsoletes(create_format_1), True) self.assertEqual(destroy_format_1.obsoletes(resize_format_1), True) self.assertEqual(destroy_format_1.obsoletes(destroy_format_1), True) self.assertEqual(destroy_format_2.obsoletes(destroy_format_1), False) self.assertEqual(destroy_format_1.obsoletes(destroy_format_2), True) # ActionDestroyDevice # # - obsoletes all actions w/ lower id that act on the same non-existent # device (including self) # sdc1 does not exist destroy_sdc1 = ActionDestroyDevice(sdc1) destroy_sdc1.apply() self.assertEqual(destroy_sdc1.obsoletes(create_format_2), True) self.assertEqual(destroy_sdc1.obsoletes(resize_format_2), True) self.assertEqual(destroy_sdc1.obsoletes(create_device_1), True) self.assertEqual(destroy_sdc1.obsoletes(resize_device_1), True) self.assertEqual(destroy_sdc1.obsoletes(destroy_sdc1), True) # ActionDestroyDevice # # - obsoletes all but ActionDestroyFormat actions w/ lower id on the # same existing device # sda1 exists sda1 = self.storage.devicetree.getDeviceByName("sda1") self.assertNotEqual(sda1, None) # sda1.format._resizable = True resize_sda1_format = ActionResizeFormat(sda1, sda1.size - Size("50 MiB")) resize_sda1_format.apply() resize_sda1 = ActionResizeDevice(sda1, sda1.size - Size("50 MiB")) resize_sda1.apply() destroy_sda1_format = ActionDestroyFormat(sda1) destroy_sda1_format.apply() destroy_sda1 = ActionDestroyDevice(sda1) destroy_sda1.apply() self.assertEqual(destroy_sda1.obsoletes(resize_sda1_format), True) self.assertEqual(destroy_sda1.obsoletes(resize_sda1), True) self.assertEqual(destroy_sda1.obsoletes(destroy_sda1), False) self.assertEqual(destroy_sda1.obsoletes(destroy_sda1_format), False)
def testActionRegistration(self): """ Verify correct operation of action registration and cancelling. """ # self.setUp has just been run, so we should have something like # a preexisting autopart config in the devicetree. # registering a destroy action for a non-leaf device should fail vg = self.storage.devicetree.getDeviceByName("VolGroup") self.assertNotEqual(vg, None) self.assertEqual(vg.isleaf, False) a = ActionDestroyDevice(vg) with self.assertRaises(ValueError): self.storage.devicetree.registerAction(a) # registering any action other than create for a device that's not in # the devicetree should fail sdc = self.storage.devicetree.getDeviceByName("sdc") self.assertNotEqual(sdc, None) sdc1 = self.newDevice( device_class=PartitionDevice, name="sdc1", size=Size("100 GiB"), parents=[sdc], exists=True ) sdc1_format = self.newFormat("ext2", device=sdc1.path, mountpoint="/") create_sdc1_format = ActionCreateFormat(sdc1, sdc1_format) create_sdc1_format.apply() with self.assertRaises(blivet.errors.DeviceTreeError): self.storage.devicetree.registerAction(create_sdc1_format) sdc1_format.exists = True sdc1_format._resizable = True resize_sdc1_format = ActionResizeFormat(sdc1, sdc1.size - Size("10 GiB")) resize_sdc1_format.apply() with self.assertRaises(blivet.errors.DeviceTreeError): self.storage.devicetree.registerAction(resize_sdc1_format) resize_sdc1 = ActionResizeDevice(sdc1, sdc1.size - Size("10 GiB")) resize_sdc1.apply() with self.assertRaises(blivet.errors.DeviceTreeError): self.storage.devicetree.registerAction(resize_sdc1) resize_sdc1.cancel() resize_sdc1_format.cancel() destroy_sdc1_format = ActionDestroyFormat(sdc1) with self.assertRaises(blivet.errors.DeviceTreeError): self.storage.devicetree.registerAction(destroy_sdc1_format) destroy_sdc1 = ActionDestroyDevice(sdc1) with self.assertRaises(blivet.errors.DeviceTreeError): self.storage.devicetree.registerAction(destroy_sdc1) # registering a device destroy action should cause the device to be # removed from the devicetree lv_root = self.storage.devicetree.getDeviceByName("VolGroup-lv_root") self.assertNotEqual(lv_root, None) a = ActionDestroyDevice(lv_root) self.storage.devicetree.registerAction(a) lv_root = self.storage.devicetree.getDeviceByName("VolGroup-lv_root") self.assertEqual(lv_root, None) self.storage.devicetree.cancelAction(a) # registering a device create action should cause the device to be # added to the devicetree sdd = self.storage.devicetree.getDeviceByName("sdd") self.assertNotEqual(sdd, None) sdd1 = self.storage.devicetree.getDeviceByName("sdd1") self.assertEqual(sdd1, None) sdd1 = self.newDevice(device_class=PartitionDevice, name="sdd1", size=Size("100 GiB"), parents=[sdd]) a = ActionCreateDevice(sdd1) self.storage.devicetree.registerAction(a) sdd1 = self.storage.devicetree.getDeviceByName("sdd1") self.assertNotEqual(sdd1, None)
def _execute_logvol_data(self, storage, data, logvol_data): """Execute the logvol data. :param storage: an instance of the Blivet's storage object :param data: an instance of kickstart data :param logvol_data: an instance of LogVolData """ devicetree = storage.devicetree # FIXME: we should be running sanityCheck on partitioning that is not ks # autopart, but that's likely too invasive for #873135 at this moment if logvol_data.mountpoint == "/boot" and blivet.arch.is_s390(): raise StorageError( _("/boot cannot be of type \"lvmlv\" on s390x") ) # we might have truncated or otherwise changed the specified vg name vgname = data.onPart.get(logvol_data.vgname, logvol_data.vgname) size = None if logvol_data.percent: size = Size(0) if logvol_data.mountpoint == "swap": ty = "swap" logvol_data.mountpoint = "" if logvol_data.recommended or logvol_data.hibernation: disk_space = self._disk_free_space size = suggest_swap_size( hibernation=logvol_data.hibernation, disk_space=disk_space ) logvol_data.grow = False else: if logvol_data.fstype != "": ty = logvol_data.fstype else: ty = storage.default_fstype if size is None and not logvol_data.preexist: if not logvol_data.size: raise StorageError( _("Size cannot be decided on from kickstart nor obtained from device.") ) size = self._get_size(logvol_data.size, "MiB") if logvol_data.thin_pool: logvol_data.mountpoint = "" ty = None # Sanity check mountpoint self._check_mount_point(logvol_data.mountpoint) # Check that the VG this LV is a member of has already been specified. vg = devicetree.get_device_by_name(vgname) if not vg: raise StorageError( _("No volume group exists with the name \"{}\". Specify volume " "groups before logical volumes.").format(logvol_data.vgname) ) # If cache PVs specified, check that they belong to the same VG this LV is a member of if logvol_data.cache_pvs: pv_devices = self._get_cache_pv_devices(devicetree, logvol_data) if not all(pv in vg.pvs for pv in pv_devices): raise StorageError( _("Cache PVs must belong to the same VG as the cached LV") ) pool = None if logvol_data.thin_volume: pool = devicetree.get_device_by_name("%s-%s" % (vg.name, logvol_data.pool_name)) if not pool: raise StorageError( _("No thin pool exists with the name \"{}\". Specify thin pools " "before thin volumes.").format(logvol_data.pool_name) ) # If this specifies an existing request that we should not format, # quit here after setting up enough information to mount it later. if not logvol_data.format: if not logvol_data.name: raise StorageError( _("logvol --noformat must also use the --name= option.") ) dev = devicetree.get_device_by_name("%s-%s" % (vg.name, logvol_data.name)) if not dev: raise StorageError( _("Logical volume \"{}\" given in logvol command does " "not exist.").format(logvol_data.name) ) if logvol_data.resize: size = dev.raw_device.align_target_size(size) if size < dev.currentSize: # shrink try: devicetree.actions.add(ActionResizeFormat(dev, size)) devicetree.actions.add(ActionResizeDevice(dev, size)) except ValueError as e: self._handle_invalid_target_size(e, logvol_data.size, dev.name) else: # grow try: devicetree.actions.add(ActionResizeDevice(dev, size)) devicetree.actions.add(ActionResizeFormat(dev, size)) except ValueError as e: self._handle_invalid_target_size(e, logvol_data.size, dev.name) dev.format.mountpoint = logvol_data.mountpoint dev.format.mountopts = logvol_data.fsopts if ty == "swap": storage.add_fstab_swap(dev) return # Make sure this LV name is not already used in the requested VG. if not logvol_data.preexist: tmp = devicetree.get_device_by_name("%s-%s" % (vg.name, logvol_data.name)) if tmp: raise StorageError( _("Logical volume name \"{}\" is already in use in volume group " "\"{}\".").format(logvol_data.name, vg.name) ) if not logvol_data.percent and size and not logvol_data.grow and size < vg.pe_size: raise StorageError( _("Logical volume size \"{}\" must be larger than the volume " "group extent size of \"{}\".").format(size, vg.pe_size) ) # Now get a format to hold a lot of these extra values. fmt = get_format( ty, mountpoint=logvol_data.mountpoint, label=logvol_data.label, fsprofile=logvol_data.fsprofile, create_options=logvol_data.mkfsopts, mountopts=logvol_data.fsopts ) if not fmt.type and not logvol_data.thin_pool: raise StorageError( _("The \"{}\" file system type is not supported.").format(ty) ) add_fstab_swap = None # If we were given a pre-existing LV to create a filesystem on, we need # to verify it and its VG exists and then schedule a new format action # to take place there. Also, we only support a subset of all the # options on pre-existing LVs. if logvol_data.preexist: device = devicetree.get_device_by_name("%s-%s" % (vg.name, logvol_data.name)) if not device: raise StorageError( _("Logical volume \"{}\" given in logvol command does " "not exist.").format(logvol_data.name) ) storage.devicetree.recursive_remove(device, remove_device=False) if logvol_data.resize: size = device.raw_device.align_target_size(size) try: devicetree.actions.add(ActionResizeDevice(device, size)) except ValueError as e: self._handle_invalid_target_size(e, logvol_data.size, device.name) devicetree.actions.add(ActionCreateFormat(device, fmt)) if ty == "swap": add_fstab_swap = device else: # If a previous device has claimed this mount point, delete the # old one. try: if logvol_data.mountpoint: device = storage.mountpoints[logvol_data.mountpoint] storage.destroy_device(device) except KeyError: pass if logvol_data.thin_volume: parents = [pool] else: parents = [vg] pool_args = {} if logvol_data.thin_pool: if logvol_data.profile: matching = (p for p in KNOWN_THPOOL_PROFILES if p.name == logvol_data.profile) profile = next(matching, None) if profile: pool_args["profile"] = profile else: log.warning( "No matching profile for %s found in LVM configuration", logvol_data.profile ) if logvol_data.metadata_size: pool_args["metadata_size"] = Size("%d MiB" % logvol_data.metadata_size) if logvol_data.chunk_size: pool_args["chunk_size"] = Size("%d KiB" % logvol_data.chunk_size) if logvol_data.maxSizeMB: maxsize = self._get_size(logvol_data.maxSizeMB, "MiB") else: maxsize = None if logvol_data.cache_size and logvol_data.cache_pvs: pv_devices = self._get_cache_pv_devices(devicetree, logvol_data) cache_size = Size("%d MiB" % logvol_data.cache_size) cache_mode = logvol_data.cache_mode or None cache_request = LVMCacheRequest(cache_size, pv_devices, cache_mode) else: cache_request = None request = storage.new_lv( fmt=fmt, name=logvol_data.name, parents=parents, size=size, thin_pool=logvol_data.thin_pool, thin_volume=logvol_data.thin_volume, grow=logvol_data.grow, maxsize=maxsize, percent=logvol_data.percent, cache_request=cache_request, **pool_args ) storage.create_device(request) if ty == "swap": add_fstab_swap = request if logvol_data.encrypted: passphrase = self._get_passphrase(logvol_data) cert = storage.get_escrow_certificate(logvol_data.escrowcert) # Get the version of LUKS and PBKDF arguments. logvol_data.luks_version = logvol_data.luks_version or storage.default_luks_version pbkdf_args = get_pbkdf_args( luks_version=logvol_data.luks_version, pbkdf_type=logvol_data.pbkdf, max_memory_kb=logvol_data.pbkdf_memory, iterations=logvol_data.pbkdf_iterations, time_ms=logvol_data.pbkdf_time ) if pbkdf_args and not luks_data.pbkdf_args: luks_data.pbkdf_args = pbkdf_args if logvol_data.preexist: luksformat = fmt device.format = get_format( "luks", passphrase=passphrase, device=device.path, cipher=logvol_data.cipher, escrow_cert=cert, add_backup_passphrase=logvol_data.backuppassphrase, luks_version=logvol_data.luks_version, pbkdf_args=pbkdf_args ) luksdev = LUKSDevice( "luks%d" % storage.next_id, fmt=luksformat, parents=device ) else: luksformat = request.format request.format = get_format( "luks", passphrase=passphrase, cipher=logvol_data.cipher, escrow_cert=cert, add_backup_passphrase=logvol_data.backuppassphrase, luks_version=logvol_data.luks_version, pbkdf_args=pbkdf_args ) luksdev = LUKSDevice( "luks%d" % storage.next_id, fmt=luksformat, parents=request ) if ty == "swap": # swap is on the LUKS device not on the LUKS' parent device, # override the info here add_fstab_swap = luksdev storage.create_device(luksdev) if add_fstab_swap: storage.add_fstab_swap(add_fstab_swap)
def _execute_raid_data(self, storage, data, raid_data): """Execute the raid data. :param storage: an instance of the Blivet's storage object :param data: an instance of kickstart data :param raid_data: an instance of RaidData """ raidmems = [] devicetree = storage.devicetree devicename = raid_data.device if raid_data.preexist: device = devicetree.resolve_device(devicename) if device: devicename = device.name kwargs = {} if raid_data.mountpoint == "swap": ty = "swap" raid_data.mountpoint = "" elif raid_data.mountpoint.startswith("pv."): ty = "lvmpv" kwargs["name"] = raid_data.mountpoint data.onPart[kwargs["name"]] = devicename if devicetree.get_device_by_name(kwargs["name"]): raise StorageError( _("PV partition \"{}\" is defined multiple " "times.").format(kwargs["name"]) ) raid_data.mountpoint = "" elif raid_data.mountpoint.startswith("btrfs."): ty = "btrfs" kwargs["name"] = raid_data.mountpoint data.onPart[kwargs["name"]] = devicename if devicetree.get_device_by_name(kwargs["name"]): raise StorageError( _("Btrfs partition \"{}\" is defined multiple " "times.").format(kwargs["name"]) ) raid_data.mountpoint = "" else: if raid_data.fstype != "": ty = raid_data.fstype elif (raid_data.mountpoint == "/boot" and "mdarray" in storage.bootloader.stage2_device_types): ty = storage.default_boot_fstype else: ty = storage.default_fstype # Sanity check mountpoint self._check_mount_point(raid_data.mountpoint) # If this specifies an existing request that we should not format, # quit here after setting up enough information to mount it later. if not raid_data.format: if not devicename: raise StorageError( _("raid --noformat must also use the --device option.") ) dev = devicetree.get_device_by_name(devicename) if not dev: raise StorageError( _("RAID device \"{}\" given in raid command does " "not exist.").format(devicename) ) dev.format.mountpoint = raid_data.mountpoint dev.format.mountopts = raid_data.fsopts if ty == "swap": storage.add_fstab_swap(dev) return # Get a list of all the RAID members. for member in raid_data.members: dev = devicetree.resolve_device(member) if not dev: # if member is using --onpart, use original device mem = data.onPart.get(member, member) dev = devicetree.resolve_device(mem) or lookup_alias(devicetree, member) if dev and dev.format.type == "luks": try: dev = dev.children[0] except IndexError: dev = None if dev and dev.format.type != "mdmember": raise StorageError( _("RAID device \"{}\" has a format of \"{}\", but should have " "a format of \"mdmember\".").format(member, dev.format.type) ) if not dev: raise StorageError( _("Tried to use undefined partition \"{}\" in RAID " "specification.").format(member) ) raidmems.append(dev) # Now get a format to hold a lot of these extra values. kwargs["fmt"] = get_format( ty, label=raid_data.label, fsprofile=raid_data.fsprofile, mountpoint=raid_data.mountpoint, mountopts=raid_data.fsopts, create_options=raid_data.mkfsopts ) if not kwargs["fmt"].type: raise StorageError( _("The \"{}\" file system type is not supported.").format(ty) ) kwargs["name"] = devicename kwargs["level"] = raid_data.level kwargs["parents"] = raidmems kwargs["member_devices"] = len(raidmems) - raid_data.spares kwargs["total_devices"] = len(raidmems) if raid_data.chunk_size: kwargs["chunk_size"] = Size("%d KiB" % raid_data.chunk_size) add_fstab_swap = None # If we were given a pre-existing RAID to create a filesystem on, # we need to verify it exists and then schedule a new format action # to take place there. Also, we only support a subset of all the # options on pre-existing RAIDs. if raid_data.preexist: device = devicetree.get_device_by_name(devicename) if not device: raise StorageError( _("RAID volume \"{}\" specified with --useexisting does " "not exist.").format(devicename) ) storage.devicetree.recursive_remove(device, remove_device=False) devicetree.actions.add(ActionCreateFormat(device, kwargs["fmt"])) if ty == "swap": add_fstab_swap = device else: if devicename and devicename in (a.name for a in storage.mdarrays): raise StorageError( _("The RAID volume name \"{}\" is already in use.").format(devicename) ) # If a previous device has claimed this mount point, delete the # old one. try: if raid_data.mountpoint: device = storage.mountpoints[raid_data.mountpoint] storage.destroy_device(device) except KeyError: pass request = storage.new_mdarray(**kwargs) storage.create_device(request) if ty == "swap": add_fstab_swap = request if raid_data.encrypted: passphrase = self._get_passphrase(raid_data) cert = storage.get_escrow_certificate(raid_data.escrowcert) # Get the version of LUKS and PBKDF arguments. raid_data.luks_version = raid_data.luks_version or storage.default_luks_version pbkdf_args = get_pbkdf_args( luks_version=raid_data.luks_version, pbkdf_type=raid_data.pbkdf, max_memory_kb=raid_data.pbkdf_memory, iterations=raid_data.pbkdf_iterations, time_ms=raid_data.pbkdf_time ) if pbkdf_args and not luks_data.pbkdf_args: luks_data.pbkdf_args = pbkdf_args if raid_data.preexist: luksformat = kwargs["fmt"] device.format = get_format( "luks", passphrase=passphrase, device=device.path, cipher=raid_data.cipher, escrow_cert=cert, add_backup_passphrase=raid_data.backuppassphrase, luks_version=raid_data.luks_version, pbkdf_args=pbkdf_args ) luksdev = LUKSDevice( "luks%d" % storage.next_id, fmt=luksformat, parents=device ) else: luksformat = request.format request.format = get_format( "luks", passphrase=passphrase, cipher=raid_data.cipher, escrow_cert=cert, add_backup_passphrase=raid_data.backuppassphrase, luks_version=raid_data.luks_version, pbkdf_args=pbkdf_args ) luksdev = LUKSDevice( "luks%d" % storage.next_id, fmt=luksformat, parents=request ) if ty == "swap": # swap is on the LUKS device instead of the parent device, # override the device here add_fstab_swap = luksdev storage.create_device(luksdev) if add_fstab_swap: storage.add_fstab_swap(add_fstab_swap)
def _execute_partition_data(self, storage, data, partition_data): """Execute the partition data. :param storage: an instance of the Blivet's storage object :param data: an instance of kickstart data :param partition_data: an instance of PartData """ devicetree = storage.devicetree kwargs = {} if partition_data.onbiosdisk != "": # edd_dict is only modified during storage.reset(), so don't do that # while executing storage. for (disk, biosdisk) in storage.edd_dict.items(): if "%x" % biosdisk == partition_data.onbiosdisk: partition_data.disk = disk break if not partition_data.disk: raise StorageError( _("No disk found for specified BIOS disk \"{}\".").format( partition_data.onbiosdisk ) ) size = None if partition_data.mountpoint == "swap": ty = "swap" partition_data.mountpoint = "" if partition_data.recommended or partition_data.hibernation: disk_space = self._disk_free_space size = suggest_swap_size( hibernation=partition_data.hibernation, disk_space=disk_space ) partition_data.grow = False # if people want to specify no mountpoint for some reason, let them # this is really needed for pSeries boot partitions :( elif partition_data.mountpoint == "None": partition_data.mountpoint = "" if partition_data.fstype: ty = partition_data.fstype else: ty = storage.default_fstype elif partition_data.mountpoint == 'appleboot': ty = "appleboot" partition_data.mountpoint = "" elif partition_data.mountpoint == 'prepboot': ty = "prepboot" partition_data.mountpoint = "" elif partition_data.mountpoint == 'biosboot': ty = "biosboot" partition_data.mountpoint = "" elif partition_data.mountpoint.startswith("raid."): ty = "mdmember" kwargs["name"] = partition_data.mountpoint partition_data.mountpoint = "" if devicetree.get_device_by_name(kwargs["name"]): raise StorageError( _("RAID partition \"{}\" is defined multiple times.").format(kwargs["name"]) ) if partition_data.onPart: data.onPart[kwargs["name"]] = partition_data.onPart elif partition_data.mountpoint.startswith("pv."): ty = "lvmpv" kwargs["name"] = partition_data.mountpoint partition_data.mountpoint = "" if devicetree.get_device_by_name(kwargs["name"]): raise StorageError( _("PV partition \"{}\" is defined multiple times.").format(kwargs["name"]) ) if partition_data.onPart: data.onPart[kwargs["name"]] = partition_data.onPart elif partition_data.mountpoint.startswith("btrfs."): ty = "btrfs" kwargs["name"] = partition_data.mountpoint partition_data.mountpoint = "" if devicetree.get_device_by_name(kwargs["name"]): raise StorageError( _("Btrfs partition \"{}\" is defined multiple times.").format(kwargs["name"]) ) if partition_data.onPart: data.onPart[kwargs["name"]] = partition_data.onPart elif partition_data.mountpoint == "/boot/efi": if blivet.arch.is_mactel(): ty = "macefi" else: ty = "EFI System Partition" partition_data.fsopts = "defaults,uid=0,gid=0,umask=077,shortname=winnt" else: if partition_data.fstype != "": ty = partition_data.fstype elif partition_data.mountpoint == "/boot": ty = storage.default_boot_fstype else: ty = storage.default_fstype if not size and partition_data.size: size = self._get_size(partition_data.size, "MiB") # If this specified an existing request that we should not format, # quit here after setting up enough information to mount it later. if not partition_data.format: if not partition_data.onPart: raise StorageError(_("part --noformat must also use the --onpart option.")) dev = devicetree.resolve_device(partition_data.onPart) if not dev: raise StorageError( _("Partition \"{}\" given in part command does " "not exist.").format(partition_data.onPart) ) if partition_data.resize: size = dev.raw_device.align_target_size(size) if size < dev.currentSize: # shrink try: devicetree.actions.add(ActionResizeFormat(dev, size)) devicetree.actions.add(ActionResizeDevice(dev, size)) except ValueError as e: self._handle_invalid_target_size(e, partition_data.size, dev.name) else: # grow try: devicetree.actions.add(ActionResizeDevice(dev, size)) devicetree.actions.add(ActionResizeFormat(dev, size)) except ValueError as e: self._handle_invalid_target_size(e, partition_data.size, dev.name) dev.format.mountpoint = partition_data.mountpoint dev.format.mountopts = partition_data.fsopts if ty == "swap": storage.add_fstab_swap(dev) return # Now get a format to hold a lot of these extra values. kwargs["fmt"] = get_format(ty, mountpoint=partition_data.mountpoint, label=partition_data.label, fsprofile=partition_data.fsprofile, mountopts=partition_data.fsopts, create_options=partition_data.mkfsopts, size=size) if not kwargs["fmt"].type: raise StorageError( _("The \"{}\" file system type is not supported.").format(ty) ) # If we were given a specific disk to create the partition on, verify # that it exists first. If it doesn't exist, see if it exists with # mapper/ on the front. If that doesn't exist either, it's an error. if partition_data.disk: disk = devicetree.resolve_device(partition_data.disk) # if this is a multipath member promote it to the real mpath if disk and disk.format.type == "multipath_member": mpath_device = disk.children[0] log.info("kickstart: part: promoting %s to %s", disk.name, mpath_device.name) disk = mpath_device if not disk: raise StorageError( _("Disk \"{}\" given in part command does " "not exist.").format(partition_data.disk) ) if not disk.partitionable: raise StorageError( _("Cannot install to unpartitionable device " "\"{}\".").format(partition_data.disk) ) if disk and disk.partitioned: kwargs["parents"] = [disk] elif disk: raise StorageError( _("Disk \"{}\" in part command is not " "partitioned.").format(partition_data.disk) ) if not kwargs["parents"]: raise StorageError( _("Disk \"{}\" given in part command does " "not exist.").format(partition_data.disk) ) kwargs["grow"] = partition_data.grow kwargs["size"] = size if partition_data.maxSizeMB: maxsize = self._get_size(partition_data.maxSizeMB, "MiB") else: maxsize = None kwargs["maxsize"] = maxsize kwargs["primary"] = partition_data.primOnly add_fstab_swap = None # If we were given a pre-existing partition to create a filesystem on, # we need to verify it exists and then schedule a new format action to # take place there. Also, we only support a subset of all the options # on pre-existing partitions. if partition_data.onPart: device = devicetree.resolve_device(partition_data.onPart) if not device: raise StorageError( _("Partition \"{}\" given in part command does " "not exist.").format(partition_data.onPart) ) storage.devicetree.recursive_remove(device, remove_device=False) if partition_data.resize: size = device.raw_device.align_target_size(size) try: devicetree.actions.add(ActionResizeDevice(device, size)) except ValueError as e: self._handle_invalid_target_size(e, partition_data.size, device.name) devicetree.actions.add(ActionCreateFormat(device, kwargs["fmt"])) if ty == "swap": add_fstab_swap = device # tmpfs mounts are not disks and don't occupy a disk partition, # so handle them here elif partition_data.fstype == "tmpfs": request = storage.new_tmp_fs(**kwargs) storage.create_device(request) else: # If a previous device has claimed this mount point, delete the # old one. try: if partition_data.mountpoint: device = storage.mountpoints[partition_data.mountpoint] storage.destroy_device(device) except KeyError: pass request = storage.new_partition(**kwargs) storage.create_device(request) if ty == "swap": add_fstab_swap = request if partition_data.encrypted: passphrase = self._get_passphrase(partition_data) cert = storage.get_escrow_certificate(partition_data.escrowcert) # Get the version of LUKS and PBKDF arguments. partition_data.luks_version = (partition_data.luks_version or storage.default_luks_version) pbkdf_args = get_pbkdf_args( luks_version=partition_data.luks_version, pbkdf_type=partition_data.pbkdf, max_memory_kb=partition_data.pbkdf_memory, iterations=partition_data.pbkdf_iterations, time_ms=partition_data.pbkdf_time ) if pbkdf_args and not luks_data.pbkdf_args: luks_data.pbkdf_args = pbkdf_args if partition_data.onPart: luksformat = kwargs["fmt"] device.format = get_format( "luks", passphrase=passphrase, device=device.path, cipher=partition_data.cipher, escrow_cert=cert, add_backup_passphrase=partition_data.backuppassphrase, luks_version=partition_data.luks_version, pbkdf_args=pbkdf_args ) luksdev = LUKSDevice( "luks%d" % storage.next_id, fmt=luksformat, parents=device ) else: luksformat = request.format request.format = get_format( "luks", passphrase=passphrase, cipher=partition_data.cipher, escrow_cert=cert, add_backup_passphrase=partition_data.backuppassphrase, luks_version=partition_data.luks_version, pbkdf_args=pbkdf_args ) luksdev = LUKSDevice("luks%d" % storage.next_id, fmt=luksformat, parents=request) if ty == "swap": # swap is on the LUKS device not on the LUKS' parent device, # override the info here add_fstab_swap = luksdev storage.create_device(luksdev) if add_fstab_swap: storage.add_fstab_swap(add_fstab_swap)