def _reset_storage(storage): """Do reset the storage. FIXME: Call the DBus task instead of this function. :param storage: an instance of the Blivet's storage object """ # Set the ignored and exclusive disks. disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION) storage.ignored_disks = disk_select_proxy.IgnoredDisks storage.exclusive_disks = disk_select_proxy.ExclusiveDisks storage.protected_devices = disk_select_proxy.ProtectedDevices storage.disk_images = disk_select_proxy.DiskImages # Reload additional modules. if not conf.target.is_image: iscsi.startup() fcoe_proxy = STORAGE.get_proxy(FCOE) fcoe_proxy.ReloadModule() if arch.is_s390(): zfcp_proxy = STORAGE.get_proxy(ZFCP) zfcp_proxy.ReloadModule() # Do the reset. storage.reset()
def __init__(self, data, storage, payload): super().__init__(data, storage, payload) self.title = N_("Installation Destination") self._container = None self._bootloader_observer = STORAGE.get_observer(BOOTLOADER) self._bootloader_observer.connect() self._disk_init_observer = STORAGE.get_observer(DISK_INITIALIZATION) self._disk_init_observer.connect() self._disk_select_observer = STORAGE.get_observer(DISK_SELECTION) self._disk_select_observer.connect() self._auto_part_observer = STORAGE.get_observer(AUTO_PARTITIONING) self._auto_part_observer.connect() self._selected_disks = self._disk_select_observer.proxy.SelectedDisks # This list gets set up once in initialize and should not be modified # except perhaps to add advanced devices. It will remain the full list # of disks that can be included in the install. self._available_disks = [] if not flags.automatedInstall: # default to using autopart for interactive installs self._auto_part_observer.proxy.SetEnabled(True) self._ready = False self._select_all = False self._auto_part_enabled = None self.errors = [] self.warnings = []
def write_storage_configuration(storage, sysroot=None): """Write the storage configuration to sysroot. :param storage: the storage object :param sysroot: a path to the target OS installation """ if sysroot is None: sysroot = util.getSysroot() if not os.path.isdir("%s/etc" % sysroot): os.mkdir("%s/etc" % sysroot) _write_escrow_packets(storage, sysroot) storage.make_mtab() storage.fsset.write() iscsi.write(sysroot, storage) fcoe_proxy = STORAGE.get_proxy(FCOE) fcoe_proxy.WriteConfiguration(sysroot) if arch.is_s390(): zfcp_proxy = STORAGE.get_proxy(ZFCP) zfcp_proxy.WriteConfiguration(sysroot) _write_dasd_conf(storage, sysroot)
def __init__(self, data, storage, payload): super().__init__(data, storage, payload) self.title = N_("Assign mount points") self._container = None self._disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION) self._manual_part_proxy = STORAGE.get_proxy(MANUAL_PARTITIONING) self._mount_info = self._gather_mount_info()
def applyDiskSelection(storage, data, use_names): onlyuse = use_names[:] for disk in (d for d in storage.disks if d.name in onlyuse): onlyuse.extend(d.name for d in disk.ancestors if d.name not in onlyuse and d.is_disk) disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION) disk_select_proxy.SetSelectedDisks(onlyuse) disk_init_proxy = STORAGE.get_proxy(DISK_INITIALIZATION) disk_init_proxy.SetDrivesToClear(use_names)
def _get_initialization_config(self): """Get the initialization config. FIXME: This is a temporary method. """ config = DiskInitializationConfig() # Update the config. disk_init_proxy = STORAGE.get_proxy(DISK_INITIALIZATION) config.initialization_mode = disk_init_proxy.InitializationMode config.drives_to_clear = disk_init_proxy.DrivesToClear config.devices_to_clear = disk_init_proxy.DevicesToClear config.initialize_labels = disk_init_proxy.InitializeLabelsEnabled config.format_unrecognized = disk_init_proxy.FormatUnrecognizedEnabled config.clear_non_existent = False # Update the disk label. disk_label = disk_init_proxy.DefaultDiskLabel if disk_label and not DiskLabel.set_default_label_type(disk_label): log.warning("%s is not a supported disklabel type on this platform. " "Using default disklabel %s instead.", disk_label, DiskLabel.get_platform_label_types()[0]) return config
def __init__(self, data, storage, payload): """ :see: pyanaconda.ui.common.Spoke.__init__ :param data: data object passed to every spoke to load/store data from/to it :type data: pykickstart.base.BaseHandler :param storage: object storing storage-related information (disks, partitioning, bootloader, etc.) :type storage: blivet.Blivet :param payload: object storing payload-related information :type payload: pyanaconda.payload.Payload """ self._error = None self._back_already_clicked = False self._storage_playground = None self.label_actions = None self.button_reset = None self.button_undo = None self._bootloader_observer = STORAGE.get_observer(BOOTLOADER) self._bootloader_observer.connect() StorageCheckHandler.__init__(self) NormalSpoke.__init__(self, data, storage, payload)
def do_format(self): """Format with a remote task.""" disk_names = [disk.name for disk in self._dasds] task_path = self._dasd_module.FormatWithTask(disk_names) task_proxy = STORAGE.get_proxy(task_path) sync_run_task(task_proxy, callback=self._report_progress)
def _configure_partitioning(self, storage): """Configure the partitioning. :param storage: an instance of Blivet """ log.debug("Executing the automatic partitioning.") # Create the auto partitioning proxy. auto_part_proxy = STORAGE.get_proxy(AUTO_PARTITIONING) # Set the filesystem type. fstype = auto_part_proxy.FilesystemType if fstype: storage.set_default_fstype(fstype) storage.set_default_boot_fstype(fstype) # Set the default pbkdf args. pbkdf_args = self._luks_format_args.get('pbkdf_args', None) if pbkdf_args and not luks_data.pbkdf_args: luks_data.pbkdf_args = pbkdf_args # Set the minimal entropy. min_luks_entropy = self._luks_format_args.get('min_luks_entropy', None) if min_luks_entropy is not None: luks_data.min_entropy = min_luks_entropy # Get the autopart requests. requests = self._get_autopart_requests(storage) # Do the autopart. self._do_autopart(storage, self._scheme, requests, self._encrypted, self._luks_format_args)
def _set_storage_defaults(self, storage): fstype = None boot_fstype = None # Get the default fstype from a kickstart file. auto_part_proxy = STORAGE.get_proxy(AUTO_PARTITIONING) if auto_part_proxy.Enabled and auto_part_proxy.FilesystemType: fstype = auto_part_proxy.FilesystemType boot_fstype = fstype # Or from an install class. elif self.instClass.defaultFS: fstype = self.instClass.defaultFS boot_fstype = None # Set the default fstype. if fstype: storage.set_default_fstype(fstype) # Set the default boot fstype. if boot_fstype: storage.set_default_boot_fstype(boot_fstype) # Set the default LUKS version. luks_version = self.instClass.default_luks_version if luks_version: storage.set_default_luks_version(luks_version) # Set the default partitioning. storage.set_default_partitioning(self.instClass.default_partitioning)
def _filter_default_partitions(requests): """Filter default partitions based on the kickstart data. :param requests: a list of requests :return: a customized list of requests """ auto_part_proxy = STORAGE.get_proxy(AUTO_PARTITIONING) skipped_mountpoints = set() skipped_fstypes = set() # Create sets of mountpoints and fstypes to remove from autorequests. if auto_part_proxy.Enabled: # Remove /home if --nohome is selected. if auto_part_proxy.NoHome: skipped_mountpoints.add("/home") # Remove /boot if --noboot is selected. if auto_part_proxy.NoBoot: skipped_mountpoints.add("/boot") # Remove swap if --noswap is selected. if auto_part_proxy.NoSwap: skipped_fstypes.add("swap") # Swap will not be recommended by the storage checker. # TODO: Remove this code from this function. from pyanaconda.storage.checker import storage_checker storage_checker.add_constraint(STORAGE_SWAP_IS_RECOMMENDED, False) # Skip mountpoints we want to remove. return [ req for req in requests if req.mountpoint not in skipped_mountpoints and req.fstype not in skipped_fstypes ]
def setup(self, storage, ksdata, payload): # the kdump addon should run only if requested if not flags.cmdline.getbool("kdump_addon", default=False): return bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) # Clear any existing crashkernel bootloader arguments extra_args = bootloader_proxy.ExtraArguments new_args = [arg for arg in extra_args if not arg.startswith('crashkernel=')] # Copy our reserved amount to the bootloader arguments if self.enabled: # Ensure that the amount is an amount in MB if self.reserveMB[-1] != 'M': self.reserveMB += 'M' new_args.append(' crashkernel=%s' % self.reserveMB) bootloader_proxy.SetExtraArguments(new_args) # Do the same thing with the storage.bootloader.boot_args set if storage.bootloader.boot_args: crashargs = [arg for arg in storage.bootloader.boot_args \ if arg.startswith('crashkernel=')] storage.bootloader.boot_args -= set(crashargs) if self.enabled: storage.bootloader.boot_args.add('crashkernel=%s' % self.reserveMB) ksdata.packages.packageList.append("kexec-tools") if self.enablefadump and os.path.exists(FADUMP_CAPABLE_FILE): storage.bootloader.boot_args.add('fadump=on')
def execute(self, storage, dry_run=False): """Execute the bootloader.""" log.debug("Execute the bootloader with dry run %s.", dry_run) bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) # Skip bootloader for s390x image installation. if blivet.arch.is_s390() \ and conf.target.is_image \ and bootloader_proxy.BootloaderMode == BOOTLOADER_ENABLED: bootloader_proxy.SetBootloaderMode(BOOTLOADER_SKIPPED) # Is the bootloader enabled? if bootloader_proxy.BootloaderMode != BOOTLOADER_ENABLED: storage.bootloader.skip_bootloader = True log.debug("Bootloader is not enabled, skipping.") return # Update the disk list. Disks are already sorted by Blivet. storage.bootloader.set_disk_list([d for d in storage.disks if d.partitioned]) # Apply the settings. self._update_flags(storage, bootloader_proxy) self._apply_args(storage, bootloader_proxy) self._apply_location(storage, bootloader_proxy) self._apply_password(storage, bootloader_proxy) self._apply_timeout(storage, bootloader_proxy) self._apply_drive_order(storage, bootloader_proxy, dry_run=dry_run) self._apply_boot_drive(storage, bootloader_proxy, dry_run=dry_run) # Set the stage2 and stage1 devices. if not dry_run: storage.bootloader.stage2_device = storage.boot_device storage.bootloader.set_stage1_device(storage.devices)
def __init__(self, data, storage, disks, show_remove=True, set_boot=True): super().__init__(data) self._storage = storage self.disks = [] self._view = self.builder.get_object("disk_tree_view") self._store = self.builder.get_object("disk_store") self._selection = self.builder.get_object("disk_selection") self._summary_label = self.builder.get_object("summary_label") self._set_button = self.builder.get_object("set_as_boot_button") self._remove_button = self.builder.get_object("remove_button") self._bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) self._previousID = None for disk in disks: self._store.append([False, "%s (%s)" % (disk.description, disk.serial), str(disk.size), str(self._storage.get_disk_free_space([disk])), disk.name, disk.id]) self.disks = disks[:] self._update_summary() if not show_remove: self.builder.get_object("remove_button").hide() if not set_boot: self._set_button.hide() if not disks: return # Don't select a boot device if no boot device is asked for. if self._bootloader_proxy.BootloaderMode != BOOTLOADER_ENABLED: return # Set up the default boot device. Use what's in the ksdata if anything, # then fall back to the first device. boot_drive = self._bootloader_proxy.Drive default_id = None if boot_drive: for d in self.disks: if d.name == boot_drive: default_id = d.id if not default_id: default_id = self.disks[0].id # And then select it in the UI. for row in self._store: if row[ID_COL] == default_id: self._previousID = row[ID_COL] row[IS_BOOT_COL] = True break
def ignore_nvdimm_blockdevs(): """Add nvdimm devices to be ignored to the ignored disks.""" if conf.target.is_directory: return nvdimm_proxy = STORAGE.get_proxy(NVDIMM) ignored_nvdimm_devs = nvdimm_proxy.GetDevicesToIgnore() if not ignored_nvdimm_devs: return log.debug("Adding NVDIMM devices %s to ignored disks", ",".join(ignored_nvdimm_devs)) disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION) ignored_disks = disk_select_proxy.IgnoredDisks ignored_disks.extend(ignored_nvdimm_devs) disk_select_proxy.SetIgnoredDisks(ignored_disks)
def reset_bootloader(storage): """Reset the bootloader. :param storage: an instance of the Blivet's storage object """ bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_proxy.SetDrive(BOOTLOADER_DRIVE_UNSET) storage.bootloader.reset()
def configure_storage(storage, data=None, interactive=False): """Setup storage state from the kickstart data. :param storage: an instance of the Blivet's storage object :param data: an instance of kickstart data or None :param interactive: use a task for the interactive partitioning """ auto_part_proxy = STORAGE.get_proxy(AUTO_PARTITIONING) if interactive: task = InteractivePartitioningTask(storage) elif auto_part_proxy.Enabled: luks_version = auto_part_proxy.LUKSVersion or storage.default_luks_version passphrase = auto_part_proxy.Passphrase or storage.encryption_passphrase escrow_cert = storage.get_escrow_certificate(auto_part_proxy.Escrowcert) pbkdf_args = get_pbkdf_args( luks_version=luks_version, pbkdf_type=auto_part_proxy.PBKDF or None, max_memory_kb=auto_part_proxy.PBKDFMemory, iterations=auto_part_proxy.PBKDFIterations, time_ms=auto_part_proxy.PBKDFTime ) luks_format_args = { "passphrase": passphrase, "cipher": auto_part_proxy.Cipher, "luks_version": luks_version, "pbkdf_args": pbkdf_args, "escrow_cert": escrow_cert, "add_backup_passphrase": auto_part_proxy.BackupPassphraseEnabled, "min_luks_entropy": MIN_CREATE_ENTROPY, } task = AutomaticPartitioningTask( storage, auto_part_proxy.Type, auto_part_proxy.Encrypted, luks_format_args ) elif STORAGE.get_proxy(MANUAL_PARTITIONING).Enabled: task = ManualPartitioningTask(storage) else: task = CustomPartitioningTask(storage, data) task.run()
def _update_custom_storage(storage, ksdata): """Update kickstart data for custom storage. :param storage: an instance of the storage :param ksdata: an instance of kickstart data """ auto_part_proxy = STORAGE.get_proxy(AUTO_PARTITIONING) manual_part_proxy = STORAGE.get_proxy(MANUAL_PARTITIONING) # Clear out whatever was there before. reset_custom_storage_data(ksdata) # Check if the custom partitioning was used. if auto_part_proxy.Enabled or manual_part_proxy.Enabled: log.debug("Custom partitioning is disabled.") return # FIXME: This is an ugly temporary workaround for UI. PartitioningModule._setup_kickstart_from_storage(ksdata, storage)
def _configure_partitioning(self, storage): """Configure the partitioning. :param storage: an instance of Blivet """ log.debug("Setting up the mount points.") manual_part_proxy = STORAGE.get_proxy(MANUAL_PARTITIONING) # Set up mount points. for mount_data in manual_part_proxy.MountPoints: self._setup_mount_point(storage, mount_data)
def __init__(self): self._dasds = [] self._can_format_unformatted = True self._can_format_ldl = True self._report = Signal() self._report.connect(log.debug) self._last_message = "" self._dasd_module = STORAGE.get_proxy(DASD)
def set_storage_defaults_from_kickstart(storage): """Set the storage default values from a kickstart file. FIXME: A temporary workaround for UI. """ # Set the default filesystem types. auto_part_proxy = STORAGE.get_proxy(AUTO_PARTITIONING) fstype = auto_part_proxy.FilesystemType if auto_part_proxy.Enabled and fstype: storage.set_default_fstype(fstype) storage.set_default_boot_fstype(fstype)
def __init__(self, data): super().__init__(data) self._view = self.builder.get_object("disk_tree_view") self._store = self.builder.get_object("disk_store") self._selection = self.builder.get_object("disk_selection") self._summary_label = self.builder.get_object("summary_label") self._set_button = self.builder.get_object("set_as_boot_button") self._remove_button = self.builder.get_object("remove_button") self._bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
def ignore_oemdrv_disks(): """Ignore disks labeled OEMDRV.""" matched = device_matches("LABEL=OEMDRV", disks_only=True) for oemdrv_disk in matched: disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION) ignored_disks = disk_select_proxy.IgnoredDisks if oemdrv_disk not in ignored_disks: log.info("Adding disk %s labeled OEMDRV to ignored disks.", oemdrv_disk) ignored_disks.append(oemdrv_disk) disk_select_proxy.SetIgnoredDisks(ignored_disks)
def _update_nvdimm_data(storage): """Update kickstart data for NVDIMM. FIXME: Move the logic to the iSCSI DBus module. :param storage: an instance of the storage """ nvdimm_proxy = STORAGE.get_proxy(NVDIMM) nvdimm_proxy.SetNamespacesToUse([ d.devname for d in storage.disks if isinstance(d, NVDIMMNamespaceDevice) ])
def __init__(self, data, storage, payload): super().__init__(data, storage, payload) self.title = N_("Partitioning Options") self._container = None self._part_type_list = sorted(PARTTYPES.keys()) # remember the original values so that we can detect a change self._disk_init_proxy = STORAGE.get_proxy(DISK_INITIALIZATION) self._orig_init_mode = self._disk_init_proxy.InitializationMode self._manual_part_proxy = STORAGE.get_proxy(MANUAL_PARTITIONING) self._orig_mount_assign = self._manual_part_proxy.Enabled # Create the auto partitioning proxy self._auto_part_proxy = STORAGE.get_proxy(AUTO_PARTITIONING) # default to mount point assignment if it is already (partially) # configured self._do_mount_assign = self._orig_mount_assign if not self._do_mount_assign: self._init_mode = self._disk_init_proxy.InitializationMode else: self._init_mode = CLEAR_PARTITIONS_NONE
def __init__(self, data, storage): super().__init__(data) self._storage = storage self._update_devicetree = False self._fcoe_proxy = STORAGE.get_proxy(FCOE) self._addButton = self.builder.get_object("addButton") self._cancelButton = self.builder.get_object("cancelButton") self._spinner = self.builder.get_object("addSpinner") self._errorBox = self.builder.get_object("errorBox") self._errorLabel = self.builder.get_object("errorLabel") self._nicCombo = self.builder.get_object("nicCombo") self._dcbCheckbox = self.builder.get_object("dcbCheckbox") self._autoCheckbox = self.builder.get_object("autoCheckbox")
def _update_clearpart(storage): """Update data for clearpart. :param storage: an instance of the storage """ disk_init_proxy = STORAGE.get_proxy(DISK_INITIALIZATION) if disk_init_proxy.InitializationMode == CLEAR_PARTITIONS_NONE: # FIXME: This is an ugly temporary workaround for UI. mode, drives, devices = DiskInitializationModule._find_cleared_devices(storage) disk_init_proxy.SetInitializationMode(mode.value) disk_init_proxy.SetDrivesToClear(drives) disk_init_proxy.SetDevicesToClear(devices)
def eval_rules(self, ksdata, storage, report_only=False): """:see: RuleHandler.eval_rules""" bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) if self._require_password and not bootloader_proxy.password_is_set: # TODO: Anaconda provides a way to set bootloader password: # bootloader_proxy.set_password(...) # We don't support setting the bootloader password yet, # but we shouldn't stop the installation, just because of that. return [RuleMessage(self.__class__, common.MESSAGE_TYPE_WARNING, "boot loader password not set up")] else: return []
def unmark_protected_device(storage, spec): """Unmark a device as protected. :param storage: an instance of the storage :param spec: a specification of the device """ disk_selection_proxy = STORAGE.get_proxy(DISK_SELECTION) protected_devices = disk_selection_proxy.ProtectedDevices if spec in protected_devices: protected_devices.remove(spec) storage.protect_devices(protected_devices) disk_selection_proxy.SetProtectedDevices(protected_devices)
def apply_disk_selection(storage, selected_names): """Apply the disks selection. :param storage: blivet.Blivet instance :param selected_names: a list of selected disk names """ # Get the selected disks. selected_disks = filter_disks_by_names(storage.disks, selected_names) # Get names of their ancestors. ancestor_names = [ ancestor.name for disk in selected_disks for ancestor in disk.ancestors if ancestor.is_disk and ancestor.name not in selected_names ] # Set the disks to select. disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION) disk_select_proxy.SetSelectedDisks(selected_names + ancestor_names) # Set the drives to clear. disk_init_proxy = STORAGE.get_proxy(DISK_INITIALIZATION) disk_init_proxy.SetDrivesToClear(selected_names)
def _run(self): """Copy bootloader data files from the deployment checkout to the target root. See https://bugzilla.gnome.org/show_bug.cgi?id=726757 This happens once, at installation time. extlinux ships its modules directly in the RPM in /boot. For GRUB2, Anaconda installs device.map there. We may need to add other bootloaders here though (if they can't easily be fixed to *copy* data into /boot at install time, instead of shipping it in the RPM). """ bootloader = STORAGE.get_proxy(BOOTLOADER) is_efi = bootloader.IsEFI() physboot = self._physroot + '/boot' ostree_boot_source = self._sysroot + '/usr/lib/ostree-boot' if not os.path.isdir(ostree_boot_source): ostree_boot_source = self._sysroot + '/boot' for fname in os.listdir(ostree_boot_source): srcpath = os.path.join(ostree_boot_source, fname) # We're only copying directories if not os.path.isdir(srcpath): continue # Special handling for EFI; first, we only want to copy the data if the system is # actually EFI (simulating grub2-efi being installed). Second, as it's a mount point # that's expected to already exist (so if we used copytree, we'd traceback). If it # doesn't, we're not on a UEFI system, so we don't want to copy the data. if not fname == 'efi' or is_efi and os.path.isdir( os.path.join(physboot, fname)): log.info("Copying bootloader data: %s", fname) safe_exec_with_redirect('cp', ['-r', '-p', srcpath, physboot]) # Unfortunate hack, see https://github.com/rhinstaller/anaconda/issues/1188 efi_grubenv_link = physboot + '/grub2/grubenv' if not is_efi and os.path.islink(efi_grubenv_link): os.unlink(efi_grubenv_link)
def _check_space_and_run_dialog(self, partitioning, disks): # User wants to reclaim the space. if self._reclaim_checkbox.get_active(): return RESPONSE_RECLAIM # Get the device tree of the partitioning module. device_tree = STORAGE.get_proxy(partitioning.GetDeviceTree()) # Calculate the required and free space. disk_free = Size(device_tree.GetDiskFreeSpace(disks)) fs_free = Size(device_tree.GetDiskReclaimableSpace(disks)) disks_size = Size(device_tree.GetDiskTotalSpace(disks)) sw_space = Size(self.payload.space_required) auto_swap = suggest_swap_size() log.debug("disk free: %s fs free: %s sw needs: %s auto swap: %s", disk_free, fs_free, sw_space, auto_swap) # We need enough space for the software, the swap and the metadata. # It is not an ideal estimate, but it works. required_space = sw_space + auto_swap + STORAGE_METADATA_RATIO * disk_free # There is enough space to continue. if disk_free >= required_space: return RESPONSE_OK # Ask user what to do. if disks_size >= required_space - auto_swap: dialog = NeedSpaceDialog(self.data, payload=self.payload) dialog.refresh(required_space, sw_space, auto_swap, disk_free, fs_free) else: dialog = NoSpaceDialog(self.data, payload=self.payload) dialog.refresh(required_space, sw_space, auto_swap, disk_free, fs_free) return self.run_lightbox_dialog(dialog)
def reset_storage(storage, scan_all=False, teardown=False, retry=True): """Reset the storage model. :param storage: an instance of the Blivet's storage object :param scan_all: should we scan all devices in the system? :param teardown: should we teardown devices in the current device tree? :param retry: should we allow to retry the reset? """ # Deactivate all devices. if teardown: try: storage.devicetree.teardown_all() except Exception: # pylint: disable=broad-except log_exception_info(log.error, "Failure tearing down device tree.") # Clear the exclusive disks to scan all devices in the system. if scan_all: disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION) disk_select_proxy.SetExclusiveDisks([]) # Do the reset. while True: try: _reset_storage(storage) except StorageError as e: # Is the retry allowed? if not retry: raise # Does the user want to retry? elif error_handler.cb(e) == ERROR_RAISE: raise # Retry the storage reset. else: continue else: # No need to retry. break
def test_evaluation_add_mount_options_report_only(proxy_getter, rule_data, ksdata_mock, storage_mock): rules = [ "part /tmp --mountoptions=nodev", "part / --mountoptions=noauto", ] messages = get_messages_for_partition_rules(rule_data, ksdata_mock, storage_mock, rules, 1, report_only=True) # two mount options added --> two info messages assert len(messages) == 2 assert messages[0].type == common.MESSAGE_TYPE_INFO assert messages[1].type == common.MESSAGE_TYPE_INFO # newly added mount options should be mentioned in the messages # together with their mount points nodev_found = False noauto_found = False for message in messages: if "'nodev'" in message.text: assert "/tmp" in message.text nodev_found = True elif "'noauto'" in message.text: assert "/" in message.text noauto_found = True assert all([nodev_found, noauto_found]) # no changes should be made device_tree_mock = STORAGE.get_proxy(DEVICE_TREE) device_tree_mock.SetDeviceMountOptions.assert_not_called()
def run(self): """Run the task.""" # Update the bootloader arguments. bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) # Clear any existing crashkernel bootloader arguments. args = [ arg for arg in bootloader_proxy.ExtraArguments if not arg.startswith('crashkernel=') ] # Copy our reserved amount to the bootloader arguments. if self._kdump_enabled: # Ensure that the amount is an amount in MB. if self._reserved_memory[-1] != 'M': self._reserved_memory += 'M' args.append('crashkernel=%s' % self._reserved_memory) # Enable fadump. if self._fadump_enabled and os.path.exists(FADUMP_CAPABLE_FILE): args.append('fadump=on') bootloader_proxy.SetExtraArguments(args)
def write_configuration(overwrite=False): """Install network configuration to target system.""" fcoe_proxy = STORAGE.get_proxy(FCOE) fcoe_nics = fcoe_proxy.GetNics() fcoe_ifaces = [dev.device_name for dev in get_supported_devices() if dev.device_name in fcoe_nics] network_proxy = NETWORK.get_proxy() task_path = network_proxy.ConfigureActivationOnBootWithTask(fcoe_ifaces) task_proxy = NETWORK.get_proxy(task_path) sync_run_task(task_proxy) task_path = network_proxy.InstallNetworkWithTask(overwrite) task_proxy = NETWORK.get_proxy(task_path) sync_run_task(task_proxy) task_path = network_proxy.ConfigureHostnameWithTask(overwrite) task_proxy = NETWORK.get_proxy(task_path) sync_run_task(task_proxy) if conf.system.can_change_hostname: hostname = network_proxy.Hostname if hostname: network_proxy.SetCurrentHostname(hostname)
def refresh(self): """ The refresh method that is called every time the spoke is displayed. It should update the UI elements according to the contents of self.data. :see: pyanaconda.ui.common.UIObject.refresh """ for thread_name in [THREAD_EXECUTE_STORAGE, THREAD_STORAGE]: threadMgr.wait(thread_name) if not self._partitioning: # Create the partitioning now. It cannot by done earlier, because # the storage spoke would use it as a default partitioning. self._partitioning = create_partitioning(PARTITIONING_METHOD_BLIVET) self._device_tree = STORAGE.get_proxy(self._partitioning.GetDeviceTree()) self._back_already_clicked = False self._client.initialize(self._partitioning.SendRequest) self._blivetgui.initialize() # if we re-enter blivet-gui spoke, actions from previous visit were # not removed, we need to update number of blivet-gui actions self._blivetgui.set_actions(self._client.get_actions())
def execute(self, payload): fcoe_proxy = STORAGE.get_proxy(FCOE) fcoe_nics = fcoe_proxy.GetNics() fcoe_ifaces = [dev.device_name for dev in network.get_supported_devices() if dev.device_name in fcoe_nics] overwrite = network.can_overwrite_configuration(payload) network_proxy = NETWORK.get_proxy() task_path = network_proxy.ConfigureActivationOnBootWithTask(fcoe_ifaces) task_proxy = NETWORK.get_proxy(task_path) sync_run_task(task_proxy) task_path = network_proxy.InstallNetworkWithTask(overwrite) task_proxy = NETWORK.get_proxy(task_path) sync_run_task(task_proxy) task_path = network_proxy.ConfigureHostnameWithTask(overwrite) task_proxy = NETWORK.get_proxy(task_path) sync_run_task(task_proxy) if conf.system.can_change_hostname: hostname = network_proxy.Hostname if hostname != network.DEFAULT_HOSTNAME: network_proxy.SetCurrentHostname(hostname)
def _rescan_devices(self): """Rescan devices.""" text = _("Warning: This will revert all changes done so far.\n" "Do you want to proceed?\n") question_window = YesNoDialog(text) ScreenHandler.push_screen_modal(question_window) if not question_window.answer: return # unset selected disks temporarily so that # storage_initialize() processes all devices disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION) selected_disks = disk_select_proxy.SelectedDisks disk_select_proxy.SetSelectedDisks([]) print(_("Scanning disks. This may take a moment...")) storage_initialize(self.storage, self.data, self.storage.protected_dev_names) disk_select_proxy.SetSelectedDisks(selected_disks) self._manual_part_proxy.SetMountPoints([]) self._mount_info = self._gather_mount_info()
def evaluation_add_mount_options(rule_data, ksdata_mock, storage_mock, messages_evaluation_count): rules = [ "part /tmp --mountoptions=defaults,nodev", "part / --mountoptions=noauto", ] messages = get_messages_for_partition_rules(rule_data, ksdata_mock, storage_mock, rules, messages_evaluation_count) # two mount options added --> two info messages assert len(messages) == 2 assert all(message.type == common.MESSAGE_TYPE_INFO for message in messages) # newly added mount options should be mentioned in the messages # together with their mount points nodev_found = False noauto_found = False for message in messages: if "'nodev'" in message.text: assert "/tmp" in message.text nodev_found = True elif "'noauto'" in message.text: assert "/" in message.text noauto_found = True assert all([nodev_found, noauto_found]) device_tree_mock = STORAGE.get_proxy(DEVICE_TREE) device_tree_mock.SetDeviceMountOptions.assert_has_calls([ mock.call("/dev/sda1", "defaults,nodev"), mock.call("/dev/sda2", "defaults,noauto"), ])
def on_start_clicked(self, *args): """ Go through the process of validating entry contents and then attempt to add the device. """ # First update widgets self._startButton.hide() self._cancelButton.set_sensitive(False) self._okButton.set_sensitive(False) self._set_configure_sensitive(False) self._conditionNotebook.set_current_page(1) # Get the input. device_name = self._deviceEntry.get_text().strip() wwpn_name = self._wwpnEntry.get_text().strip() lun_name = self._lunEntry.get_text().strip() # Get the discovery task. task_path = self._zfcp_proxy.DiscoverWithTask(device_name, wwpn_name, lun_name) task_proxy = STORAGE.get_proxy(task_path) # Start the discovery. async_run_task(task_proxy, self.process_result) self._spinner.start()
def _prepare_configuration(payload, ksdata): """Configure the installed system.""" configuration_queue = TaskQueue("Configuration queue") # connect progress reporting configuration_queue.queue_started.connect( lambda x: progress_message(x.status_message)) configuration_queue.task_completed.connect(lambda x: progress_step(x.name)) # add installation tasks for the Subscription DBus module if is_module_available(SUBSCRIPTION): # we only run the tasks if the Subscription module is available subscription_config = TaskQueue("Subscription configuration", N_("Configuring Red Hat subscription")) subscription_proxy = SUBSCRIPTION.get_proxy() subscription_dbus_tasks = subscription_proxy.InstallWithTasks() subscription_config.append_dbus_tasks(SUBSCRIPTION, subscription_dbus_tasks) configuration_queue.append(subscription_config) # schedule the execute methods of ksdata that require an installed system to be present os_config = TaskQueue("Installed system configuration", N_("Configuring installed system")) # add installation tasks for the Security DBus module security_proxy = SECURITY.get_proxy() security_dbus_tasks = security_proxy.InstallWithTasks() os_config.append_dbus_tasks(SECURITY, security_dbus_tasks) # add installation tasks for the Timezone DBus module # run these tasks before tasks of the Services module if is_module_available(TIMEZONE): timezone_proxy = TIMEZONE.get_proxy() timezone_dbus_tasks = timezone_proxy.InstallWithTasks() os_config.append_dbus_tasks(TIMEZONE, timezone_dbus_tasks) # add installation tasks for the Services DBus module services_proxy = SERVICES.get_proxy() services_dbus_tasks = services_proxy.InstallWithTasks() os_config.append_dbus_tasks(SERVICES, services_dbus_tasks) # add installation tasks for the Localization DBus module if is_module_available(LOCALIZATION): localization_proxy = LOCALIZATION.get_proxy() localization_dbus_tasks = localization_proxy.InstallWithTasks() os_config.append_dbus_tasks(LOCALIZATION, localization_dbus_tasks) # add the Firewall configuration task if conf.target.can_configure_network: firewall_proxy = NETWORK.get_proxy(FIREWALL) firewall_dbus_task = firewall_proxy.InstallWithTask() os_config.append_dbus_tasks(NETWORK, [firewall_dbus_task]) configuration_queue.append(os_config) # schedule network configuration (if required) if conf.target.can_configure_network and conf.system.provides_network_config: overwrite = payload.type in PAYLOAD_LIVE_TYPES network_config = TaskQueue("Network configuration", N_("Writing network configuration")) network_config.append( Task("Network configuration", network.write_configuration, (overwrite, ))) configuration_queue.append(network_config) # add installation tasks for the Users DBus module if is_module_available(USERS): user_config = TaskQueue("User creation", N_("Creating users")) users_proxy = USERS.get_proxy() users_dbus_tasks = users_proxy.InstallWithTasks() user_config.append_dbus_tasks(USERS, users_dbus_tasks) configuration_queue.append(user_config) # Anaconda addon configuration addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons")) # there is no longer a User class & addons should no longer need it # FIXME: drop user class parameter from the API & all known addons addon_config.append( Task("Configure Anaconda addons", ksdata.addons.execute, (None, ksdata, None, payload))) boss_proxy = BOSS.get_proxy() addon_config.append_dbus_tasks(BOSS, [boss_proxy.InstallSystemWithTask()]) configuration_queue.append(addon_config) # Initramfs generation generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs")) bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) def run_generate_initramfs(): tasks = bootloader_proxy.GenerateInitramfsWithTasks( payload.type, payload.kernel_version_list) for task in tasks: sync_run_task(STORAGE.get_proxy(task)) generate_initramfs.append( Task("Generate initramfs", run_generate_initramfs)) configuration_queue.append(generate_initramfs) # Configure FIPS. configuration_queue.append_dbus_tasks( SECURITY, [security_proxy.ConfigureFIPSWithTask()]) # realm join # - this can run only after network is configured in the target system chroot configuration_queue.append_dbus_tasks(SECURITY, [security_proxy.JoinRealmWithTask()]) post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts")) post_scripts.append( Task("Run post installation scripts", runPostScripts, (ksdata.scripts, ))) configuration_queue.append(post_scripts) # setup kexec reboot if requested if flags.flags.kexec: kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec")) kexec_setup.append(Task("Setup kexec", setup_kexec)) configuration_queue.append(kexec_setup) # write anaconda related configs & kickstarts write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts")) # Write the kickstart file to the installed system (or, copy the input # kickstart file over if one exists). if flags.flags.nosave_output_ks: # don't write the kickstart file to the installed system if this has # been disabled by the nosave option log.warning( "Writing of the output kickstart to installed system has been disabled" " by the nosave option.") else: # write anaconda related configs & kickstarts write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, ))) # only add write_configs to the main queue if we actually store some kickstarts/configs if write_configs.task_count: configuration_queue.append(write_configs) return configuration_queue
def run_install_bootloader(): tasks = bootloader_proxy.InstallBootloaderWithTasks( payload.type, payload.kernel_version_list) for task in tasks: sync_run_task(STORAGE.get_proxy(task))
def _prepare_installation(payload, ksdata): """Perform an installation. This method takes the ksdata as prepared by the UI (the first hub, in graphical mode) and applies it to the disk. The two main tasks for this are putting filesystems onto disks and installing packages onto those filesystems. """ installation_queue = TaskQueue("Installation queue") # connect progress reporting installation_queue.queue_started.connect( lambda x: progress_message(x.status_message)) installation_queue.task_completed.connect(lambda x: progress_step(x.name)) # This should be the only thread running, wait for the others to finish if not. if threadMgr.running > 1: # it could be that the threads finish execution before the task is executed, # but that should not cause any issues def wait_for_all_treads(): for message in ("Thread %s is running" % n for n in threadMgr.names): log.debug(message) threadMgr.wait_all() # Use a queue with a single task as only TaskQueues have the status_message # property used for setting the progress status in the UI. wait_for_threads = TaskQueue( "Wait for threads to finish", N_("Waiting for %s threads to finish") % (threadMgr.running - 1)) wait_for_threads.append( Task("Wait for all threads to finish", wait_for_all_treads)) installation_queue.append(wait_for_threads) # Save system time to HW clock. # - this used to be before waiting on threads, but I don't think that's needed if conf.system.can_set_hardware_clock: # lets just do this as a top-level task - no save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock) installation_queue.append(save_hwclock) # setup the installation environment setup_environment = TaskQueue( "Installation environment setup", N_("Setting up the installation environment")) setup_environment.append( Task("Setup addons", ksdata.addons.setup, (None, ksdata, payload))) boss_proxy = BOSS.get_proxy() setup_environment.append_dbus_tasks( BOSS, [boss_proxy.ConfigureRuntimeWithTask()]) installation_queue.append(setup_environment) # Do partitioning. # Depending on current payload the storage might be apparently configured # either before or after package/payload installation. # So let's have two task queues - early storage & late storage. storage_proxy = STORAGE.get_proxy() early_storage = TaskQueue("Early storage configuration", N_("Configuring storage")) early_storage.append_dbus_tasks(STORAGE, storage_proxy.InstallWithTasks()) if payload.type == PAYLOAD_TYPE_DNF: conf_task = storage_proxy.WriteConfigurationWithTask() early_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(early_storage) # Run %pre-install scripts with the filesystem mounted and no packages pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts")) pre_install_scripts.append( Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts, ))) installation_queue.append(pre_install_scripts) # Do various pre-installation tasks # - try to discover a realm (if any) # - check for possibly needed additional packages. pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks")) # make name resolution work for rpm scripts in chroot if conf.system.provides_resolver_config: # we use a custom Task subclass as the sysroot path has to be resolved # only when the task is actually started, not at task creation time pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot")) # realm discovery security_proxy = SECURITY.get_proxy() pre_install.append_dbus_tasks(SECURITY, [security_proxy.DiscoverRealmWithTask()]) # Set up FIPS for the payload installation. fips_task = security_proxy.PreconfigureFIPSWithTask(payload.type) pre_install.append_dbus_tasks(SECURITY, [fips_task]) # Install the payload. pre_install.append( Task("Find additional packages & run pre_install()", payload.pre_install)) installation_queue.append(pre_install) payload_install = TaskQueue("Payload installation", N_("Installing.")) payload_install.append(Task("Install the payload", payload.install)) installation_queue.append(payload_install) # for some payloads storage is configured after the payload is installed if payload.type != PAYLOAD_TYPE_DNF: late_storage = TaskQueue("Late storage configuration", N_("Configuring storage")) conf_task = storage_proxy.WriteConfigurationWithTask() late_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(late_storage) # Do bootloader. bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader")) def run_install_bootloader(): tasks = bootloader_proxy.InstallBootloaderWithTasks( payload.type, payload.kernel_version_list) for task in tasks: sync_run_task(STORAGE.get_proxy(task)) bootloader_install.append( Task("Install bootloader", run_install_bootloader)) installation_queue.append(bootloader_install) post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks"))) post_install.append( Task("Run post-installation setup tasks", payload.post_install)) installation_queue.append(post_install) # Create snapshot snapshot_proxy = STORAGE.get_proxy(SNAPSHOT) if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL): snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots")) snapshot_task = snapshot_proxy.CreateWithTask( SNAPSHOT_WHEN_POST_INSTALL) snapshot_creation.append_dbus_tasks(STORAGE, [snapshot_task]) installation_queue.append(snapshot_creation) return installation_queue
def run_generate_initramfs(): tasks = bootloader_proxy.GenerateInitramfsWithTasks( payload.type, payload.kernel_version_list) for task in tasks: sync_run_task(STORAGE.get_proxy(task))
# Collect all addon paths addon_paths = collect_addon_paths(constants.ADDON_PATHS) # If we were given a kickstart file on the command line, parse (but do not # execute) that now. Otherwise, load in defaults from kickstart files # shipped with the installation media. ksdata = startup_utils.parse_kickstart(opts, addon_paths, pass_to_boss=True) # Pick up any changes from interactive-defaults.ks that would # otherwise be covered by the dracut KS parser. from pyanaconda.modules.common.constants.services import STORAGE from pyanaconda.modules.common.constants.objects import BOOTLOADER bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) if opts.extlinux: bootloader_proxy.SetBootloaderType(constants.BOOTLOADER_TYPE_EXTLINUX) if opts.leavebootorder: bootloader_proxy.SetKeepBootOrder(True) if opts.nombr: bootloader_proxy.SetKeepMBR(True) if ksdata.rescue.rescue: flags.rescue_mode = True # reboot with kexec if ksdata.reboot.kexec:
def _set_storage_boot_args(self, storage): """Set the storage boot args.""" fcoe_proxy = STORAGE.get_proxy(FCOE) iscsi_proxy = STORAGE.get_proxy(ISCSI) # FIPS boot_device = storage.mountpoints.get("/boot") if kernel_arguments.get("fips") == "1" and boot_device: self.boot_args.add("boot=%s" % self.stage2_device.fstab_spec) # Storage dracut_devices = [storage.root_device] if self.stage2_device != storage.root_device: dracut_devices.append(self.stage2_device) swap_devices = storage.fsset.swap_devices dracut_devices.extend(swap_devices) # Add resume= option to enable hibernation on x86. # Choose the largest swap device for that. if blivet.arch.is_x86() and swap_devices: resume_device = max(swap_devices, key=lambda x: x.size) self.boot_args.add("resume=%s" % resume_device.fstab_spec) # Does /usr have its own device? If so, we need to tell dracut usr_device = storage.mountpoints.get("/usr") if usr_device: dracut_devices.extend([usr_device]) netdevs = [d for d in storage.devices \ if (getattr(d, "complete", True) and isinstance(d, NetworkStorageDevice))] rootdev = storage.root_device if any(rootdev.depends_on(netdev) for netdev in netdevs): dracut_devices = set(dracut_devices) # By this time this thread should be the only one running, and also # mountpoints is a property function that returns a new dict every # time, so iterating over the values is safe. for dev in storage.mountpoints.values(): if any(dev.depends_on(netdev) for netdev in netdevs): dracut_devices.add(dev) done = [] for device in dracut_devices: for dep in storage.devices: if dep in done: continue if device != dep and not device.depends_on(dep): continue if isinstance(dep, blivet.devices.FcoeDiskDevice): setup_args = fcoe_proxy.GetDracutArguments(dep.nic) elif isinstance(dep, blivet.devices.iScsiDiskDevice): # (partial) offload devices do not need setup in dracut if not dep.offload: node = _get_iscsi_node_from_device(dep) setup_args = iscsi_proxy.GetDracutArguments( Node.to_structure(node)) else: setup_args = dep.dracut_setup_args() if not setup_args: continue self.boot_args.update(setup_args) self.dracut_args.update(setup_args) done.append(dep) # network configuration arguments if isinstance(dep, NetworkStorageDevice): network_proxy = NETWORK.get_proxy() network_args = [] ibft = False nic = "" if isinstance(dep, blivet.devices.iScsiDiskDevice): if dep.iface == "default" or ":" in dep.iface: node = _get_iscsi_node_from_device(dep) if iscsi_proxy.IsNodeFromIbft( Node.to_structure(node)): ibft = True else: nic = iface_for_host_ip(dep.host_address) else: nic = iscsi_proxy.GetInterface(dep.iface) else: nic = dep.nic if nic or ibft: network_args = network_proxy.GetDracutArguments( nic, dep.host_address, "", ibft) self.boot_args.update(network_args) self.dracut_args.update(network_args) # This is needed for FCoE, bug #743784. The case: # We discover LUN on an iface which is part of multipath setup. # If the iface is disconnected after discovery anaconda doesn't # write dracut ifname argument for the disconnected iface path # (in NETWORK.GetDracutArguments). # Dracut needs the explicit ifname= because biosdevname # fails to rename the iface (because of BFS booting from it). for nic in fcoe_proxy.GetNics(): hwaddr = get_interface_hw_address(nic) if hwaddr: self.boot_args.add("ifname=%s:%s" % (nic, hwaddr.lower())) # Add rd.iscsi.firmware to trigger dracut running iscsistart # See rhbz#1099603 and rhbz#1185792 if len(glob("/sys/firmware/iscsi_boot*")) > 0: self.boot_args.add("rd.iscsi.firmware")
def configure_bootloader(): boot_task = bootloader_proxy.ConfigureWithTask( payload.kernel_version_list) sync_run_task(STORAGE.get_proxy(boot_task))
# Run %pre scripts. startup_utils.run_pre_scripts(kspath) # Collect all addon paths from pyanaconda.addons import collect_addon_paths addon_paths = collect_addon_paths(constants.ADDON_PATHS) # Parse the kickstart file. ksdata = startup_utils.parse_kickstart(kspath, addon_paths, strict_mode=opts.ksstrict) # Pick up any changes from interactive-defaults.ks that would # otherwise be covered by the dracut KS parser. from pyanaconda.modules.common.constants.services import STORAGE from pyanaconda.modules.common.constants.objects import BOOTLOADER bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) if opts.leavebootorder: bootloader_proxy.SetKeepBootOrder(True) if opts.nombr: bootloader_proxy.SetKeepMBR(True) if ksdata.rescue.rescue: flags.rescue_mode = True # reboot with kexec if ksdata.reboot.kexec: flags.kexec = True # Change the logging configuration based on the kickstart.
def reset_bootloader(): """Reset the bootloader.""" bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_proxy.SetDrive(BOOTLOADER_DRIVE_UNSET)
def post_install(self): super().post_install() data = self._get_source_configuration() gi.require_version("OSTree", "1.0") from gi.repository import OSTree cancellable = None # Following up on the "remote delete" above, we removed the # remote from /ostree/repo/config. But we want it in /etc, so # re-add it to /etc/ostree/remotes.d, using the sysroot path. # # However, we ignore the case where the remote already exists, # which occurs when the content itself provides the remote # config file. # Note here we use the deployment as sysroot, because it's # that version of /etc that we want. sysroot_file = Gio.File.new_for_path(conf.target.system_root) sysroot = OSTree.Sysroot.new(sysroot_file) sysroot.load(cancellable) repo = sysroot.get_repo(None)[1] repo.remote_change(sysroot_file, OSTree.RepoRemoteChange.ADD_IF_NOT_EXISTS, data.remote, data.url, Variant('a{sv}', self._remoteOptions), cancellable) boot = conf.target.system_root + '/boot' # If we're using GRUB2, move its config file, also with a # compatibility symlink. boot_grub2_cfg = boot + '/grub2/grub.cfg' if os.path.isfile(boot_grub2_cfg): boot_loader = boot + '/loader' target_grub_cfg = boot_loader + '/grub.cfg' log.info("Moving %s -> %s", boot_grub2_cfg, target_grub_cfg) os.rename(boot_grub2_cfg, target_grub_cfg) os.symlink('../loader/grub.cfg', boot_grub2_cfg) # Skip kernel args setup for dirinstall, there is no bootloader or rootDevice setup. if not conf.target.is_directory: # OSTree owns the bootloader configuration, so here we give it # the argument list we computed from storage, architecture and # such. bootloader = STORAGE.get_proxy(BOOTLOADER) device_tree = STORAGE.get_proxy(DEVICE_TREE) root_name = device_tree.GetRootDevice() root_data = DeviceData.from_structure( device_tree.GetDeviceData(root_name)) set_kargs_args = ["admin", "instutil", "set-kargs"] set_kargs_args.extend(bootloader.GetArguments()) set_kargs_args.append("root=" + device_tree.GetFstabSpec(root_name)) if root_data.type == "btrfs subvolume": set_kargs_args.append("rootflags=subvol=" + root_name) self._safe_exec_with_redirect("ostree", set_kargs_args, root=conf.target.system_root)
def __str__(self): storage_module_proxy = STORAGE.get_proxy() return storage_module_proxy.GenerateKickstart()
def doInstall(storage, payload, ksdata): """Perform an installation. This method takes the ksdata as prepared by the UI (the first hub, in graphical mode) and applies it to the disk. The two main tasks for this are putting filesystems onto disks and installing packages onto those filesystems. """ bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED can_install_bootloader = not conf.target.is_directory and bootloader_enabled installation_queue = TaskQueue("Installation queue") # connect progress reporting installation_queue.queue_started.connect( lambda x: progress_message(x.status_message)) installation_queue.task_completed.connect(lambda x: progress_step(x.name)) # This should be the only thread running, wait for the others to finish if not. if threadMgr.running > 1: # it could be that the threads finish execution before the task is executed, # but that should not cause any issues def wait_for_all_treads(): for message in ("Thread %s is running" % n for n in threadMgr.names): log.debug(message) threadMgr.wait_all() # Use a queue with a single task as only TaskQueues have the status_message # property used for setting the progress status in the UI. wait_for_threads = TaskQueue( "Wait for threads to finish", N_("Waiting for %s threads to finish") % (threadMgr.running - 1)) wait_for_threads.append( Task("Wait for all threads to finish", wait_for_all_treads)) installation_queue.append(wait_for_threads) # Save system time to HW clock. # - this used to be before waiting on threads, but I don't think that's needed if conf.system.can_set_hardware_clock: # lets just do this as a top-level task - no save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock) installation_queue.append(save_hwclock) # setup the installation environment setup_environment = TaskQueue( "Installation environment setup", N_("Setting up the installation environment")) setup_environment.append( Task("Setup addons", ksdata.addons.setup, (storage, ksdata, payload))) installation_queue.append(setup_environment) # Do partitioning. # Depending on current payload the storage might be apparently configured # either before or after package/payload installation. # So let's have two task queues - early storage & late storage. early_storage = TaskQueue("Early storage configuration", N_("Configuring storage")) # put custom storage info into ksdata early_storage.append( Task("Insert custom storage to ksdata", task=update_storage_ksdata, task_args=(storage, ksdata))) # callbacks for blivet message_clbk = lambda clbk_data: progress_message(clbk_data.msg) entropy_wait_clbk = lambda clbk_data: wait_for_entropy( clbk_data.msg, clbk_data.min_entropy, ksdata) callbacks_reg = callbacks.create_new_callbacks_register( create_format_pre=message_clbk, resize_format_pre=message_clbk, wait_for_entropy=entropy_wait_clbk) if not conf.target.is_directory: early_storage.append( Task("Activate filesystems", task=turn_on_filesystems, task_args=(storage, ), task_kwargs={"callbacks": callbacks_reg})) early_storage.append( Task("Mount filesystems", task=storage.mount_filesystems)) if payload.needs_storage_configuration and not conf.target.is_directory: early_storage.append( Task("Write early storage", task=write_storage_configuration, task_args=(storage, ))) installation_queue.append(early_storage) # Run %pre-install scripts with the filesystem mounted and no packages pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts")) pre_install_scripts.append( Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts, ))) installation_queue.append(pre_install_scripts) # Do packaging. # Discover information about realms to join to determine the need for additional packages. realm_discover = TaskQueue("Realm discover", N_("Discovering realm to join")) realm_discover.append(Task("Discover realm to join", ksdata.realm.setup)) installation_queue.append(realm_discover) # Check for other possibly needed additional packages. pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks")) pre_install.append(Task("Setup authselect", ksdata.authselect.setup)) pre_install.append(Task("Setup firewall", ksdata.firewall.setup)) pre_install.append(Task("Setup network", ksdata.network.setup)) # Setup timezone and add chrony as package if timezone was set in KS # and "-chrony" wasn't in packages section and/or --nontp wasn't set. pre_install.append( Task("Setup timezone", ksdata.timezone.setup, (ksdata, ))) # make name resolution work for rpm scripts in chroot if conf.system.provides_resolver_config: # we use a custom Task subclass as the sysroot path has to be resolved # only when the task is actually started, not at task creation time pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot")) def run_pre_install(): """This means to gather what additional packages (if any) are needed & executing payload.pre_install().""" # anaconda requires storage packages in order to make sure the target # system is bootable and configurable, and some other packages in order # to finish setting up the system. payload.requirements.add_packages(storage.packages, reason="storage") payload.requirements.add_packages(ksdata.realm.packages, reason="realm") payload.requirements.add_packages(ksdata.authselect.packages, reason="authselect") payload.requirements.add_packages(ksdata.firewall.packages, reason="firewall") payload.requirements.add_packages(ksdata.network.packages, reason="network") payload.requirements.add_packages(ksdata.timezone.packages, reason="ntp", strong=False) if can_install_bootloader: payload.requirements.add_packages(storage.bootloader.packages, reason="bootloader") payload.requirements.add_groups(payload.language_groups(), reason="language groups") payload.requirements.add_packages(payload.langpacks(), reason="langpacks", strong=False) payload.pre_install() pre_install.append( Task("Find additional packages & run pre_install()", run_pre_install)) installation_queue.append(pre_install) payload_install = TaskQueue("Payload installation", N_("Installing.")) payload_install.append(Task("Install the payload", payload.install)) installation_queue.append(payload_install) # for some payloads storage is configured after the payload is installed if not payload.needs_storage_configuration: late_storage = TaskQueue("Late storage configuration", N_("Configuring storage")) late_storage.append( Task("Prepare mount targets", task=payload.prepare_mount_targets, task_args=(storage, ))) if not conf.target.is_directory: late_storage.append( Task("Write late storage", task=write_storage_configuration, task_args=(storage, ))) installation_queue.append(late_storage) # Do bootloader. if can_install_bootloader: bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader")) bootloader_install.append( Task("Install bootloader", write_boot_loader, (storage, payload))) installation_queue.append(bootloader_install) post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks"))) post_install.append( Task("Run post-installation setup tasks", payload.post_install)) installation_queue.append(post_install) # Create snapshot snapshot_proxy = STORAGE.get_proxy(SNAPSHOT) if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL): snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots")) snapshot_requests = ksdata.snapshot.get_requests( SNAPSHOT_WHEN_POST_INSTALL) snapshot_task = SnapshotCreateTask(storage, snapshot_requests, SNAPSHOT_WHEN_POST_INSTALL) snapshot_creation.append( Task("Create post-install snapshots", snapshot_task.run)) installation_queue.append(snapshot_creation) # notify progress tracking about the number of steps progress_init(installation_queue.task_count) # log contents of the main task queue log.info(installation_queue.summary) # log tasks and queues when they are started # - note that we are using generators to add the counter queue_counter = util.item_counter(installation_queue.queue_count) task_started_counter = util.item_counter(installation_queue.task_count) task_completed_counter = util.item_counter(installation_queue.task_count) installation_queue.queue_started.connect(lambda x: log.info( "Queue started: %s (%s)", x.name, next(queue_counter))) installation_queue.task_started.connect(lambda x: log.info( "Task started: %s (%s)", x.name, next(task_started_counter))) installation_queue.task_completed.connect( lambda x: log.debug("Task completed: %s (%s) (%1.1f s)", x.name, next(task_completed_counter), x.elapsed_time)) # start the task queue installation_queue.start() # done progress_complete()
# Collect all addon paths addon_paths = collect_addon_paths(constants.ADDON_PATHS) # If we were given a kickstart file on the command line, parse (but do not # execute) that now. Otherwise, load in defaults from kickstart files # shipped with the installation media. ksdata = startup_utils.parse_kickstart(opts, addon_paths, pass_to_boss=True) # Pick up any changes from interactive-defaults.ks that would # otherwise be covered by the dracut KS parser. from pyanaconda.modules.common.constants.services import STORAGE from pyanaconda.modules.common.constants.objects import BOOTLOADER bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) if bootloader_proxy.BootloaderType == constants.BOOTLOADER_TYPE_EXTLINUX: flags.extlinux = True if ksdata.rescue.rescue: flags.rescue_mode = True # reboot with kexec if ksdata.reboot.kexec: flags.kexec = True # Some kickstart commands must be executed immediately, as they affect # how anaconda operates. ksdata.logging.execute()
def _prepare_installation(storage, payload, ksdata): """Perform an installation. This method takes the ksdata as prepared by the UI (the first hub, in graphical mode) and applies it to the disk. The two main tasks for this are putting filesystems onto disks and installing packages onto those filesystems. """ bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED can_install_bootloader = not conf.target.is_directory and bootloader_enabled installation_queue = TaskQueue("Installation queue") # connect progress reporting installation_queue.queue_started.connect( lambda x: progress_message(x.status_message)) installation_queue.task_completed.connect(lambda x: progress_step(x.name)) # This should be the only thread running, wait for the others to finish if not. if threadMgr.running > 1: # it could be that the threads finish execution before the task is executed, # but that should not cause any issues def wait_for_all_treads(): for message in ("Thread %s is running" % n for n in threadMgr.names): log.debug(message) threadMgr.wait_all() # Use a queue with a single task as only TaskQueues have the status_message # property used for setting the progress status in the UI. wait_for_threads = TaskQueue( "Wait for threads to finish", N_("Waiting for %s threads to finish") % (threadMgr.running - 1)) wait_for_threads.append( Task("Wait for all threads to finish", wait_for_all_treads)) installation_queue.append(wait_for_threads) # Save system time to HW clock. # - this used to be before waiting on threads, but I don't think that's needed if conf.system.can_set_hardware_clock: # lets just do this as a top-level task - no save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock) installation_queue.append(save_hwclock) # setup the installation environment setup_environment = TaskQueue( "Installation environment setup", N_("Setting up the installation environment")) setup_environment.append( Task("Setup addons", ksdata.addons.setup, (storage, ksdata, payload))) boss_proxy = BOSS.get_proxy() setup_environment.append_dbus_tasks( BOSS, [boss_proxy.ConfigureRuntimeWithTask()]) installation_queue.append(setup_environment) # Do partitioning. # Depending on current payload the storage might be apparently configured # either before or after package/payload installation. # So let's have two task queues - early storage & late storage. early_storage = TaskQueue("Early storage configuration", N_("Configuring storage")) # put custom storage info into ksdata early_storage.append( Task("Insert custom storage to ksdata", task=update_storage_ksdata, task_args=(storage, ksdata))) # callbacks for blivet message_clbk = lambda clbk_data: progress_message(clbk_data.msg) entropy_wait_clbk = lambda clbk_data: wait_for_entropy( clbk_data.msg, clbk_data.min_entropy, ksdata) callbacks_reg = callbacks.create_new_callbacks_register( create_format_pre=message_clbk, resize_format_pre=message_clbk, wait_for_entropy=entropy_wait_clbk) if not conf.target.is_directory: early_storage.append( Task("Activate filesystems", task=turn_on_filesystems, task_args=(storage, ), task_kwargs={"callbacks": callbacks_reg})) early_storage.append( Task("Mount filesystems", task=storage.mount_filesystems)) if payload.needs_storage_configuration and not conf.target.is_directory: early_storage.append( Task("Write early storage", task=write_storage_configuration, task_args=(storage, ))) installation_queue.append(early_storage) # Run %pre-install scripts with the filesystem mounted and no packages pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts")) pre_install_scripts.append( Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts, ))) installation_queue.append(pre_install_scripts) # Do various pre-installation tasks # - try to discover a realm (if any) # - check for possibly needed additional packages. pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks")) # Setup timezone and add chrony as package if timezone was set in KS # and "-chrony" wasn't in packages section and/or --nontp wasn't set. pre_install.append( Task("Setup timezone", ksdata.timezone.setup, (ksdata, ))) # make name resolution work for rpm scripts in chroot if conf.system.provides_resolver_config: # we use a custom Task subclass as the sysroot path has to be resolved # only when the task is actually started, not at task creation time pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot")) # realm discovery security_proxy = SECURITY.get_proxy() pre_install.append_dbus_tasks(SECURITY, [security_proxy.DiscoverRealmWithTask()]) def run_pre_install(): """This means to gather what additional packages (if any) are needed & executing payload.pre_install().""" # anaconda requires storage packages in order to make sure the target # system is bootable and configurable, and some other packages in order # to finish setting up the system. payload.requirements.add_packages(storage.packages, reason="storage") payload.requirements.add_packages(ksdata.timezone.packages, reason="ntp", strong=False) if can_install_bootloader: payload.requirements.add_packages(storage.bootloader.packages, reason="bootloader") if kernel_arguments.is_enabled("fips"): payload.requirements.add_packages(['/usr/bin/fips-mode-setup'], reason="compliance") payload.requirements.add_groups(payload.language_groups(), reason="language groups") payload.requirements.add_packages(payload.langpacks(), reason="langpacks", strong=False) # add package requirements from modules # - iterate over all modules we know have valid package requirements # - add any requirements found to the payload requirement tracking modules_with_package_requirements = [SECURITY, NETWORK] for module in modules_with_package_requirements: module_proxy = module.get_proxy() module_requirements = Requirement.from_structure_list( module_proxy.CollectRequirements()) log.debug("Adding requirements for module %s : %s", module, module_requirements) payload.requirements.add_requirements(module_requirements) payload.pre_install() pre_install.append( Task("Find additional packages & run pre_install()", run_pre_install)) installation_queue.append(pre_install) payload_install = TaskQueue("Payload installation", N_("Installing.")) payload_install.append(Task("Install the payload", payload.install)) installation_queue.append(payload_install) # for some payloads storage is configured after the payload is installed if not payload.needs_storage_configuration and not conf.target.is_directory: late_storage = TaskQueue("Late storage configuration", N_("Configuring storage")) late_storage.append( Task("Write late storage", task=write_storage_configuration, task_args=(storage, ))) installation_queue.append(late_storage) # Do bootloader. if can_install_bootloader: bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader")) bootloader_install.append( Task("Install bootloader", write_boot_loader, (storage, payload))) installation_queue.append(bootloader_install) post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks"))) post_install.append( Task("Run post-installation setup tasks", payload.post_install)) installation_queue.append(post_install) # Create snapshot snapshot_proxy = STORAGE.get_proxy(SNAPSHOT) if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL): snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots")) snapshot_requests = ksdata.snapshot.get_requests( SNAPSHOT_WHEN_POST_INSTALL) snapshot_task = SnapshotCreateTask(storage, snapshot_requests, SNAPSHOT_WHEN_POST_INSTALL) snapshot_creation.append( Task("Create post-install snapshots", snapshot_task.run)) installation_queue.append(snapshot_creation) return installation_queue
def _set_extra_boot_args(self): """Set the extra boot args.""" bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) self.boot_args.update(bootloader_proxy.ExtraArguments)
def _prepare_installation(payload, ksdata): """Perform an installation. This method takes the ksdata as prepared by the UI (the first hub, in graphical mode) and applies it to the disk. The two main tasks for this are putting filesystems onto disks and installing packages onto those filesystems. """ installation_queue = TaskQueue("Installation queue") # connect progress reporting installation_queue.queue_started.connect( lambda x: progress_message(x.status_message)) installation_queue.task_completed.connect(lambda x: progress_step(x.name)) # This should be the only thread running, wait for the others to finish if not. if threadMgr.running > 1: # it could be that the threads finish execution before the task is executed, # but that should not cause any issues def wait_for_all_treads(): for message in ("Thread %s is running" % n for n in threadMgr.names): log.debug(message) threadMgr.wait_all() # Use a queue with a single task as only TaskQueues have the status_message # property used for setting the progress status in the UI. wait_for_threads = TaskQueue( "Wait for threads to finish", N_("Waiting for %s threads to finish") % (threadMgr.running - 1)) wait_for_threads.append( Task("Wait for all threads to finish", wait_for_all_treads)) installation_queue.append(wait_for_threads) # Save system time to HW clock. # - this used to be before waiting on threads, but I don't think that's needed if conf.system.can_set_hardware_clock: # lets just do this as a top-level task - no save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock) installation_queue.append(save_hwclock) # setup the installation environment setup_environment = TaskQueue( "Installation environment setup", N_("Setting up the installation environment")) setup_environment.append( Task("Setup addons", ksdata.addons.setup, (None, ksdata, payload))) boss_proxy = BOSS.get_proxy() setup_environment.append_dbus_tasks( BOSS, [boss_proxy.ConfigureRuntimeWithTask()]) installation_queue.append(setup_environment) # Do partitioning. # Depending on current payload the storage might be apparently configured # either before or after package/payload installation. # So let's have two task queues - early storage & late storage. storage_proxy = STORAGE.get_proxy() early_storage = TaskQueue("Early storage configuration", N_("Configuring storage")) early_storage.append_dbus_tasks(STORAGE, storage_proxy.InstallWithTasks()) if payload.needs_storage_configuration: conf_task = storage_proxy.WriteConfigurationWithTask() early_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(early_storage) # Run %pre-install scripts with the filesystem mounted and no packages pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts")) pre_install_scripts.append( Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts, ))) installation_queue.append(pre_install_scripts) # Do various pre-installation tasks # - try to discover a realm (if any) # - check for possibly needed additional packages. pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks")) # Setup timezone and add chrony as package if timezone was set in KS # and "-chrony" wasn't in packages section and/or --nontp wasn't set. timezone_proxy = TIMEZONE.get_proxy() ntp_excluded = timezone.NTP_PACKAGE in ksdata.packages.excludedList pre_install.append_dbus_tasks( TIMEZONE, [timezone_proxy.ConfigureNTPServiceEnablementWithTask(ntp_excluded)]) # make name resolution work for rpm scripts in chroot if conf.system.provides_resolver_config: # we use a custom Task subclass as the sysroot path has to be resolved # only when the task is actually started, not at task creation time pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot")) # realm discovery security_proxy = SECURITY.get_proxy() pre_install.append_dbus_tasks(SECURITY, [security_proxy.DiscoverRealmWithTask()]) def run_pre_install(): """This means to gather what additional packages (if any) are needed & executing payload.pre_install().""" # anaconda requires storage packages in order to make sure the target # system is bootable and configurable, and some other packages in order # to finish setting up the system. if kernel_arguments.is_enabled("fips"): payload.requirements.add_packages(['/usr/bin/fips-mode-setup'], reason="compliance") payload.requirements.add_groups(payload.language_groups(), reason="language groups") payload.requirements.add_packages(payload.langpacks(), reason="langpacks", strong=False) # add package requirements from modules # - iterate over all modules we know have valid package requirements # - add any requirements found to the payload requirement tracking modules_with_package_requirements = [ SECURITY, NETWORK, TIMEZONE, STORAGE ] for module in modules_with_package_requirements: module_proxy = module.get_proxy() module_requirements = Requirement.from_structure_list( module_proxy.CollectRequirements()) log.debug("Adding requirements for module %s : %s", module, module_requirements) payload.requirements.add_requirements(module_requirements) payload.pre_install() pre_install.append( Task("Find additional packages & run pre_install()", run_pre_install)) installation_queue.append(pre_install) payload_install = TaskQueue("Payload installation", N_("Installing.")) payload_install.append(Task("Install the payload", payload.install)) installation_queue.append(payload_install) # for some payloads storage is configured after the payload is installed if not payload.needs_storage_configuration: late_storage = TaskQueue("Late storage configuration", N_("Configuring storage")) conf_task = storage_proxy.WriteConfigurationWithTask() late_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(late_storage) # Do bootloader. bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader")) def configure_bootloader(): boot_task = bootloader_proxy.ConfigureWithTask( payload.kernel_version_list) sync_run_task(STORAGE.get_proxy(boot_task)) if not payload.handles_bootloader_configuration: # FIXME: This is a temporary workaround, run the DBus task directly. bootloader_install.append( Task("Configure the bootloader", configure_bootloader)) bootloader_install.append_dbus_tasks(STORAGE, [bootloader_proxy.InstallWithTask()]) installation_queue.append(bootloader_install) post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks"))) post_install.append( Task("Run post-installation setup tasks", payload.post_install)) installation_queue.append(post_install) # Create snapshot snapshot_proxy = STORAGE.get_proxy(SNAPSHOT) if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL): snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots")) snapshot_task = snapshot_proxy.CreateWithTask( SNAPSHOT_WHEN_POST_INSTALL) snapshot_creation.append_dbus_tasks(STORAGE, [snapshot_task]) installation_queue.append(snapshot_creation) return installation_queue
def _prepare_configuration(payload, ksdata): """Configure the installed system.""" configuration_queue = TaskQueue("Configuration queue") # connect progress reporting configuration_queue.queue_started.connect( lambda x: progress_message(x.status_message)) configuration_queue.task_completed.connect(lambda x: progress_step(x.name)) # schedule the execute methods of ksdata that require an installed system to be present os_config = TaskQueue("Installed system configuration", N_("Configuring installed system")) # add installation tasks for the Security DBus module security_proxy = SECURITY.get_proxy() security_dbus_tasks = security_proxy.InstallWithTasks() os_config.append_dbus_tasks(SECURITY, security_dbus_tasks) # add installation tasks for the Services DBus module services_proxy = SERVICES.get_proxy() services_dbus_tasks = services_proxy.InstallWithTasks() os_config.append_dbus_tasks(SERVICES, services_dbus_tasks) # add installation tasks for the Timezone DBus module timezone_proxy = TIMEZONE.get_proxy() timezone_dbus_tasks = timezone_proxy.InstallWithTasks() os_config.append_dbus_tasks(TIMEZONE, timezone_dbus_tasks) # add installation tasks for the Localization DBus module localization_proxy = LOCALIZATION.get_proxy() localization_dbus_tasks = localization_proxy.InstallWithTasks() os_config.append_dbus_tasks(LOCALIZATION, localization_dbus_tasks) # add the Firewall configuration task firewall_proxy = NETWORK.get_proxy(FIREWALL) firewall_dbus_task = firewall_proxy.InstallWithTask() os_config.append_dbus_tasks(NETWORK, [firewall_dbus_task]) configuration_queue.append(os_config) # schedule network configuration (if required) if conf.system.provides_network_config: overwrite = isinstance(payload, LiveImagePayload) network_config = TaskQueue("Network configuration", N_("Writing network configuration")) network_config.append( Task("Network configuration", network.write_configuration, (overwrite, ))) configuration_queue.append(network_config) # add installation tasks for the Users DBus module user_config = TaskQueue("User creation", N_("Creating users")) users_proxy = USERS.get_proxy() users_dbus_tasks = users_proxy.InstallWithTasks() os_config.append_dbus_tasks(USERS, users_dbus_tasks) configuration_queue.append(user_config) # Anaconda addon configuration addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons")) # there is no longer a User class & addons should no longer need it # FIXME: drop user class parameter from the API & all known addons addon_config.append( Task("Configure Anaconda addons", ksdata.addons.execute, (None, ksdata, None, payload))) boss_proxy = BOSS.get_proxy() addon_config.append_dbus_tasks(BOSS, [boss_proxy.InstallSystemWithTask()]) configuration_queue.append(addon_config) # Initramfs generation generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs")) generate_initramfs.append( Task("Generate initramfs", payload.recreate_initrds)) # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is # recreated after the first writeBootLoader call. This reruns it after the new initrd has # been created, fixing the kernel root and subvol args and adding the missing initrd entry. bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) if isinstance(payload, LiveImagePayload): btrfs_task = bootloader_proxy.FixBTRFSWithTask( payload.kernel_version_list) generate_initramfs.append_dbus_tasks(STORAGE, [btrfs_task]) # Invoking zipl should be the last thing done on a s390x installation (see #1652727). zipl_task = bootloader_proxy.FixZIPLWithTask() generate_initramfs.append_dbus_tasks(STORAGE, [zipl_task]) configuration_queue.append(generate_initramfs) # realm join # - this can run only after network is configured in the target system chroot configuration_queue.append_dbus_tasks(SECURITY, [security_proxy.JoinRealmWithTask()]) post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts")) post_scripts.append( Task("Run post installation scripts", runPostScripts, (ksdata.scripts, ))) configuration_queue.append(post_scripts) # setup kexec reboot if requested if flags.flags.kexec: kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec")) kexec_setup.append(Task("Setup kexec", setup_kexec)) configuration_queue.append(kexec_setup) # write anaconda related configs & kickstarts write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts")) # Write the kickstart file to the installed system (or, copy the input # kickstart file over if one exists). if flags.flags.nosave_output_ks: # don't write the kickstart file to the installed system if this has # been disabled by the nosave option log.warning( "Writing of the output kickstart to installed system has been disabled" " by the nosave option.") else: # write anaconda related configs & kickstarts write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, ))) # only add write_configs to the main queue if we actually store some kickstarts/configs if write_configs.task_count: configuration_queue.append(write_configs) return configuration_queue
def doConfiguration(storage, payload, ksdata): """Configure the installed system.""" configuration_queue = TaskQueue("Configuration queue") # connect progress reporting configuration_queue.queue_started.connect( lambda x: progress_message(x.status_message)) configuration_queue.task_completed.connect(lambda x: progress_step(x.name)) # schedule the execute methods of ksdata that require an installed system to be present os_config = TaskQueue("Installed system configuration", N_("Configuring installed system")) os_config.append(Task("Configure authselect", ksdata.authselect.execute)) os_config.append(Task("Configure SELinux", ksdata.selinux.execute)) os_config.append( Task("Configure first boot tasks", ksdata.firstboot.execute)) os_config.append(Task("Configure services", ksdata.services.execute)) os_config.append(Task("Configure keyboard", ksdata.keyboard.execute)) os_config.append(Task("Configure timezone", ksdata.timezone.execute)) os_config.append(Task("Configure language", ksdata.lang.execute)) os_config.append(Task("Configure firewall", ksdata.firewall.execute)) os_config.append(Task("Configure X", ksdata.xconfig.execute)) configuration_queue.append(os_config) # schedule network configuration (if required) if conf.system.provides_network_config: network_config = TaskQueue("Network configuration", N_("Writing network configuration")) network_config.append( Task("Network configuration", ksdata.network.execute, (payload, ))) configuration_queue.append(network_config) # creating users and groups requires some pre-configuration. u = Users() user_config = TaskQueue("User creation", N_("Creating users")) user_config.append( Task("Configure root", ksdata.rootpw.execute, (storage, ksdata, u))) user_config.append( Task("Configure user groups", ksdata.group.execute, (storage, ksdata, u))) user_config.append( Task("Configure user", ksdata.user.execute, (storage, ksdata, u))) user_config.append( Task("Configure SSH key", ksdata.sshkey.execute, (storage, ksdata, u))) configuration_queue.append(user_config) # Anaconda addon configuration addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons")) addon_config.append( Task("Configure Anaconda addons", ksdata.addons.execute, (storage, ksdata, u, payload))) configuration_queue.append(addon_config) # Initramfs generation generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs")) generate_initramfs.append( Task("Generate initramfs", payload.recreate_initrds)) # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is # recreated after the first writeBootLoader call. This reruns it after the new initrd has # been created, fixing the kernel root and subvol args and adding the missing initrd entry. boot_on_btrfs = isinstance(storage.mountpoints.get("/"), BTRFSDevice) bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED if isinstance(payload, LiveImagePayload) and boot_on_btrfs and bootloader_enabled: generate_initramfs.append( Task("Write BTRFS bootloader fix", write_boot_loader, (storage, payload))) # Invoking zipl should be the last thing done on a s390x installation (see #1652727). if arch.is_s390() and not conf.target.is_directory and bootloader_enabled: generate_initramfs.append( Task("Rerun zipl", lambda: util.execInSysroot("zipl", []))) configuration_queue.append(generate_initramfs) # join a realm (if required) if ksdata.realm.discovered: join_realm = TaskQueue( "Realm join", N_("Joining realm: %s") % ksdata.realm.discovered) join_realm.append(Task("Join a realm", ksdata.realm.execute)) configuration_queue.append(join_realm) post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts")) post_scripts.append( Task("Run post installation scripts", runPostScripts, (ksdata.scripts, ))) configuration_queue.append(post_scripts) # setup kexec reboot if requested if flags.flags.kexec: kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec")) kexec_setup.append(Task("Setup kexec", setup_kexec)) configuration_queue.append(kexec_setup) # write anaconda related configs & kickstarts write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts")) # Write the kickstart file to the installed system (or, copy the input # kickstart file over if one exists). if flags.flags.nosave_output_ks: # don't write the kickstart file to the installed system if this has # been disabled by the nosave option log.warning( "Writing of the output kickstart to installed system has been disabled" " by the nosave option.") else: # write anaconda related configs & kickstarts write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, ))) # Write out the user interaction config file. # # But make sure it's not written out in the image and directory installation mode, # as that might result in spokes being inadvertently hidden when the actual installation # starts from the generate image or directory contents. if conf.target.is_image: log.info( "Not writing out user interaction config file due to image install mode." ) elif conf.target.is_directory: log.info( "Not writing out user interaction config file due to directory install mode." ) else: write_configs.append( Task("Store user interaction config", screen_access.sam.write_out_config_file)) # only add write_configs to the main queue if we actually store some kickstarts/configs if write_configs.task_count: configuration_queue.append(write_configs) # notify progress tracking about the number of steps progress_init(configuration_queue.task_count) # log contents of the main task queue log.info(configuration_queue.summary) # log tasks and queues when they are started # - note that we are using generators to add the counter queue_counter = util.item_counter(configuration_queue.queue_count) task_started_counter = util.item_counter(configuration_queue.task_count) task_completed_counter = util.item_counter(configuration_queue.task_count) configuration_queue.queue_started.connect(lambda x: log.info( "Queue started: %s (%s)", x.name, next(queue_counter))) configuration_queue.task_started.connect(lambda x: log.info( "Task started: %s (%s)", x.name, next(task_started_counter))) configuration_queue.task_completed.connect( lambda x: log.debug("Task completed: %s (%s) (%1.1f s)", x.name, next(task_completed_counter), x.elapsed_time)) # start the task queue configuration_queue.start() # done progress_complete()
def doConfiguration(storage, payload, ksdata): """Configure the installed system.""" configuration_queue = TaskQueue("Configuration queue") # connect progress reporting configuration_queue.queue_started.connect( lambda x: progress_message(x.status_message)) configuration_queue.task_completed.connect(lambda x: progress_step(x.name)) # schedule the execute methods of ksdata that require an installed system to be present os_config = TaskQueue("Installed system configuration", N_("Configuring installed system")) os_config.append(Task("Configure authselect", ksdata.authselect.execute)) security_proxy = SECURITY.get_proxy() security_dbus_tasks = security_proxy.InstallWithTasks(util.getSysroot()) # add one Task instance per DBUS task for dbus_task in security_dbus_tasks: task_proxy = SECURITY.get_proxy(dbus_task) os_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy, ))) services_proxy = SERVICES.get_proxy() services_dbus_tasks = services_proxy.InstallWithTasks(util.getSysroot()) # add one Task instance per DBUS task for dbus_task in services_dbus_tasks: task_proxy = SERVICES.get_proxy(dbus_task) os_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy, ))) os_config.append(Task("Configure keyboard", ksdata.keyboard.execute)) os_config.append(Task("Configure timezone", ksdata.timezone.execute)) localization_proxy = LOCALIZATION.get_proxy() localization_dbus_tasks = localization_proxy.InstallWithTasks( util.getSysroot()) # add one Task instance per DBUS task for dbus_task in localization_dbus_tasks: task_proxy = LOCALIZATION.get_proxy(dbus_task) os_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy, ))) firewall_proxy = NETWORK.get_proxy(FIREWALL) firewall_dbus_task = firewall_proxy.InstallWithTask(util.getSysroot()) task_proxy = NETWORK.get_proxy(firewall_dbus_task) os_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy, ))) configuration_queue.append(os_config) # schedule network configuration (if required) if conf.system.provides_network_config: network_config = TaskQueue("Network configuration", N_("Writing network configuration")) network_config.append( Task("Network configuration", ksdata.network.execute, (payload, ))) configuration_queue.append(network_config) # creating users and groups requires some pre-configuration. user_config = TaskQueue("User creation", N_("Creating users")) users_proxy = USERS.get_proxy() users_dbus_tasks = users_proxy.InstallWithTasks(util.getSysroot()) # add one Task instance per DBUS task for dbus_task in users_dbus_tasks: task_proxy = USERS.get_proxy(dbus_task) user_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy, ))) configuration_queue.append(user_config) # Anaconda addon configuration addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons")) # there is no longer a User class & addons should no longer need it # FIXME: drop user class parameter from the API & all known addons addon_config.append( Task("Configure Anaconda addons", ksdata.addons.execute, (storage, ksdata, None, payload))) configuration_queue.append(addon_config) # Initramfs generation generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs")) generate_initramfs.append( Task("Generate initramfs", payload.recreate_initrds)) # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is # recreated after the first writeBootLoader call. This reruns it after the new initrd has # been created, fixing the kernel root and subvol args and adding the missing initrd entry. boot_on_btrfs = isinstance(storage.mountpoints.get("/"), BTRFSDevice) bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED if isinstance(payload, LiveImagePayload) and boot_on_btrfs and bootloader_enabled: generate_initramfs.append( Task("Write BTRFS bootloader fix", write_boot_loader, (storage, payload))) # Invoking zipl should be the last thing done on a s390x installation (see #1652727). if arch.is_s390() and not conf.target.is_directory and bootloader_enabled: generate_initramfs.append( Task("Rerun zipl", lambda: util.execInSysroot("zipl", []))) configuration_queue.append(generate_initramfs) # join a realm (if required) if ksdata.realm.discovered: join_realm = TaskQueue( "Realm join", N_("Joining realm: %s") % ksdata.realm.discovered) join_realm.append(Task("Join a realm", ksdata.realm.execute)) configuration_queue.append(join_realm) post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts")) post_scripts.append( Task("Run post installation scripts", runPostScripts, (ksdata.scripts, ))) configuration_queue.append(post_scripts) # setup kexec reboot if requested if flags.flags.kexec: kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec")) kexec_setup.append(Task("Setup kexec", setup_kexec)) configuration_queue.append(kexec_setup) # write anaconda related configs & kickstarts write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts")) # Write the kickstart file to the installed system (or, copy the input # kickstart file over if one exists). if flags.flags.nosave_output_ks: # don't write the kickstart file to the installed system if this has # been disabled by the nosave option log.warning( "Writing of the output kickstart to installed system has been disabled" " by the nosave option.") else: # write anaconda related configs & kickstarts write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, ))) # only add write_configs to the main queue if we actually store some kickstarts/configs if write_configs.task_count: configuration_queue.append(write_configs) # notify progress tracking about the number of steps progress_init(configuration_queue.task_count) # log contents of the main task queue log.info(configuration_queue.summary) # log tasks and queues when they are started # - note that we are using generators to add the counter queue_counter = util.item_counter(configuration_queue.queue_count) task_started_counter = util.item_counter(configuration_queue.task_count) task_completed_counter = util.item_counter(configuration_queue.task_count) configuration_queue.queue_started.connect(lambda x: log.info( "Queue started: %s (%s)", x.name, next(queue_counter))) configuration_queue.task_started.connect(lambda x: log.info( "Task started: %s (%s)", x.name, next(task_started_counter))) configuration_queue.task_completed.connect( lambda x: log.debug("Task completed: %s (%s) (%1.1f s)", x.name, next(task_completed_counter), x.elapsed_time)) # start the task queue configuration_queue.start() # done progress_complete()