def collect_remote_requirements(): """Collect requirements of the DBus modules. :return: a list of requirements """ boss = BOSS.get_proxy() return Requirement.from_structure_list(boss.CollectRequirements())
def collect_language_requirements_test(self, proxy_getter): """Test the function collect_language_requirements.""" boss = BOSS.get_proxy() boss.GetModules.return_value = [LOCALIZATION.service_name] proxy = LOCALIZATION.get_proxy() proxy.Language = "cs_CZ.UTF-8" proxy.LanguageSupport = ["en_GB.UTF-8", "sr_RS@cyrilic"] p1 = self._create_package("langpacks-cs") p2 = self._create_package("langpacks-core-cs") p3 = self._create_package("langpacks-core-font-cs") p4 = self._create_package("langpacks-en") p5 = self._create_package("langpacks-en_GB") p6 = self._create_package("langpacks-core-en") p7 = self._create_package("langpacks-core-en_GB") p8 = self._create_package("langpacks-core-font-en") base = Mock() base.sack.query.return_value.available.return_value.filter.return_value = [ p1, p2, p3, p4, p5, p6, p7, p8 ] with self.assertLogs(level="WARNING") as cm: requirements = collect_language_requirements(base) r1 = self._create_requirement( "langpacks-cs", "Required to support the locale 'cs_CZ.UTF-8'.") r2 = self._create_requirement( "langpacks-en_GB", "Required to support the locale 'en_GB.UTF-8'.") self._compare_requirements(requirements, [r1, r2]) msg = "Selected locale 'sr_RS@cyrilic' does not match any available langpacks." self.assertTrue(any(map(lambda x: msg in x, cm.output)))
def test_collect_language_requirements(self, proxy_getter): """Test the function collect_language_requirements.""" boss = BOSS.get_proxy() boss.GetModules.return_value = [LOCALIZATION.service_name] proxy = LOCALIZATION.get_proxy() proxy.Language = "cs_CZ.UTF-8" proxy.LanguageSupport = ["en_GB.UTF-8", "sr_RS@cyrilic"] dnf_manager = Mock(spec=DNFManager) dnf_manager.match_available_packages.return_value = [ "langpacks-cs", "langpacks-core-cs", "langpacks-core-font-cs", "langpacks-en", "langpacks-en_GB", "langpacks-core-en", "langpacks-core-en_GB", "langpacks-core-font-en" ] with self.assertLogs(level="WARNING") as cm: requirements = collect_language_requirements(dnf_manager) r1 = self._create_requirement( "langpacks-cs", "Required to support the locale 'cs_CZ.UTF-8'.") r2 = self._create_requirement( "langpacks-en_GB", "Required to support the locale 'en_GB.UTF-8'.") self._compare_requirements(requirements, [r1, r2]) msg = "Selected locale 'sr_RS@cyrilic' does not match any available langpacks." assert any(map(lambda x: msg in x, cm.output))
def parseKickstart(handler, f, strict_mode=False, pass_to_boss=False): # preprocessing the kickstart file has already been handled in initramfs. ksparser = AnacondaKSParser(handler) kswarnings = [] showwarning = warnings.showwarning def ksshowwarning(message, category, filename, lineno, file=None, line=None): # Print the warning with default function. showwarning(message, category, filename, lineno, file, line) # Collect pykickstart warnings. if issubclass(category, KickstartParseWarning): kswarnings.append(message) try: # Process warnings differently in this part. with warnings.catch_warnings(): # Set up the warnings module. warnings.showwarning = ksshowwarning warnings.simplefilter("always", category=KickstartParseWarning) # Parse the kickstart file in DBus modules. if pass_to_boss: boss = BOSS.get_proxy() report = KickstartReport.from_structure( boss.ReadKickstartFile(f) ) for warn in report.warning_messages: warnings.warn(warn.message, KickstartParseWarning) if not report.is_valid(): message = "\n\n".join(map(str, report.error_messages)) raise KickstartError(message) # Parse the kickstart file in anaconda. ksparser.readKickstart(f) # Process pykickstart warnings in the strict mode: if strict_mode and kswarnings: raise KickstartError("Please modify your kickstart file to fix the warnings " "or remove the `ksstrict` option.") except KickstartError as e: # We do not have an interface here yet, so we cannot use our error # handling callback. parsing_log.error(e) # Print kickstart warnings in the strict mode. if strict_mode and kswarnings: print(_("\nSome warnings occurred during reading the kickstart file:")) for w in kswarnings: print(str(w).strip()) # Print an error and terminate. print(_("\nAn error occurred during reading the kickstart file:" "\n%s\n\nThe installer will now terminate.") % str(e).strip()) util.ipmi_report(IPMI_ABORTED) time.sleep(10) sys.exit(1)
def _set_password_policies(self, policies): """Set the password policies for the installer. :param policies: a dictionary of password policies """ proxy = BOSS.get_proxy(USER_INTERFACE) proxy.PasswordPolicies = \ PasswordPolicy.to_structure_dict(policies)
def is_module_available(module_service_identifier): """Check if the module appears to be running. :param module_service_identifier: module service identifier to check :type module_service_identifier: DBusServiceIdentifier instance :return: True if module is running, False otherwise :rtype: bool """ boss_proxy = BOSS.get_proxy() return module_service_identifier.service_name in boss_proxy.GetModules()
def set_dbus_defaults(): boss = BOSS.get_proxy() boss.GetModules.return_value = [ KDUMP.service_name ] kdump = KDUMP.get_proxy() kdump.KdumpEnabled = True user_interface = BOSS.get_proxy(USER_INTERFACE) user_interface.PasswordPolicies = {} network = NETWORK.get_proxy() network.Connected.return_value = True firewall = NETWORK.get_proxy(FIREWALL) firewall.EnabledServices = [] firewall.DisabledServices = [] firewall.EnabledPorts = [] firewall.Trusts = [] device_tree = STORAGE.get_proxy(DEVICE_TREE) device_tree.GetDeviceMountOptions.return_value = "defaults" device_tree.GetMountPoints.return_value = {} bootloader = STORAGE.get_proxy(BOOTLOADER) bootloader.IsPasswordSet = False users = USERS.get_proxy() users.IsRootPasswordSet = True users.IsRootPasswordCrypted = False users.RootPassword = "******" payloads = PAYLOADS.get_proxy() payloads.ActivePayload = "/fake/payload/1" dnf_payload = PAYLOADS.get_proxy("/fake/payload/1") dnf_payload.Type = PAYLOAD_TYPE_DNF packages_data = PackagesSelectionData() dnf_payload.PackagesSelection = PackagesSelectionData.to_structure(packages_data)
def collect_remote_requirements_test(self, proxy_getter): """Test the function collect_remote_requirements.""" r1 = self._create_requirement("a", "Required by A.") r2 = self._create_requirement("b", "Required by B.") r3 = self._create_requirement("c", "Required by C.") boss = BOSS.get_proxy() boss.CollectRequirements.return_value = \ Requirement.to_structure_list([r1, r2, r3]) requirements = collect_remote_requirements() self._compare_requirements(requirements, [r1, r2, r3])
def get_policy(policy_name) -> PasswordPolicy: """Get the password policy data. :param policy_name: a name of the policy :return: a password policy data """ proxy = BOSS.get_proxy(USER_INTERFACE) policies = PasswordPolicy.from_structure_dict(proxy.PasswordPolicies) if policy_name in policies: return policies[policy_name] return PasswordPolicy.from_defaults(policy_name)
def _get_password_policies(self): """Get the password policies from the installer. :return: a dictionary of password policies """ proxy = BOSS.get_proxy(USER_INTERFACE) policies = PasswordPolicy.from_structure_dict(proxy.PasswordPolicies) if PASSWORD_POLICY_ROOT not in policies: policy = PasswordPolicy.from_defaults(PASSWORD_POLICY_ROOT) policies[PASSWORD_POLICY_ROOT] = policy return policies
def _wait_for_modules(self, timeout): """Wait for the modules to start.""" boss = BOSS.get_proxy() while not boss.AllModulesAvailable and timeout > 0: log.info("Waiting %d sec for modules to be started.", timeout) time.sleep(1) timeout = timeout - 1 if not timeout: log.error("Waiting for modules to be started timed out.") raise TimeoutError( "Anaconda DBus modules failed to start on time.")
def run_boss(kickstart_modules=None, addons_enabled=True): """Start Boss service on DBus. :param kickstart_modules: a list of service identifiers :param addons_enabled: should we start the addons? """ if kickstart_modules is None: kickstart_modules = ALL_KICKSTART_MODULES bus_proxy = DBus.get_dbus_proxy() bus_proxy.StartServiceByName(BOSS.service_name, DBUS_FLAG_NONE) boss_proxy = BOSS.get_proxy() boss_proxy.StartModules([m.service_name for m in kickstart_modules], addons_enabled)
def _load_kickstart(self): """Load the kickstart""" from pyanaconda import kickstart # Construct a commandMap with only the supported Anaconda's commands commandMap = dict( (k, kickstart.commandMap[k]) for k in SUPPORTED_KICKSTART_COMMANDS) # Prepare new data object self.data = kickstart.AnacondaKSHandler(self._addon_module_paths["ks"], commandUpdates=commandMap) kickstart_path = INPUT_KICKSTART_PATH if os.path.exists(OUTPUT_KICKSTART_PATH): log.info("using kickstart from previous run for input") kickstart_path = OUTPUT_KICKSTART_PATH log.info("parsing input kickstart %s", kickstart_path) try: # Read the installed kickstart parser = kickstart.AnacondaKSParser(self.data) parser.readKickstart(kickstart_path) log.info("kickstart parsing done") except pykickstart.errors.KickstartError as kserr: log.critical("kickstart parsing failed: %s", kserr) log.critical( "Initial Setup startup failed due to invalid kickstart file") raise InitialSetupError # if we got this far the kickstart should be valid, so send it to Boss as well boss = BOSS.get_proxy() report = KickstartReport.from_structure( boss.ReadKickstartFile(kickstart_path)) if not report.is_valid(): message = "\n\n".join(map(str, report.error_messages)) raise InitialSetupError(message) if self.external_reconfig: # set the reconfig flag in kickstart so that # relevant spokes show up services_proxy = SERVICES.get_proxy() services_proxy.SetSetupOnBoot(SETUP_ON_BOOT_RECONFIG) # Record if groups, users or root password has been set before Initial Setup # has been started, so that we don't trample over existing configuration. users_proxy = USERS.get_proxy() self._groups_already_configured = bool(users_proxy.Groups) self._users_already_configured = bool(users_proxy.Users) self._root_password_already_configured = users_proxy.IsRootPasswordSet
def _load_kickstart(self): """Load the kickstart""" from pyanaconda import kickstart # Construct a commandMap with only the supported Anaconda's commands commandMap = dict( (k, kickstart.commandMap[k]) for k in SUPPORTED_KICKSTART_COMMANDS) # Prepare new data object self.data = kickstart.AnacondaKSHandler(self._addon_module_paths["ks"], commandUpdates=commandMap) kickstart_path = INPUT_KICKSTART_PATH if os.path.exists(OUTPUT_KICKSTART_PATH): log.info("using kickstart from previous run for input") kickstart_path = OUTPUT_KICKSTART_PATH log.info("parsing input kickstart %s", kickstart_path) try: # Read the installed kickstart parser = kickstart.AnacondaKSParser(self.data) parser.readKickstart(kickstart_path) log.info("kickstart parsing done") except pykickstart.errors.KickstartError as kserr: log.critical("kickstart parsing failed: %s", kserr) log.critical( "Initial Setup startup failed due to invalid kickstart file") raise InitialSetupError # if we got this far the kickstart should be valid, so send it to Boss as well boss = BOSS.get_proxy() boss.SplitKickstart(kickstart_path) errors = boss.DistributeKickstart() if errors: message = "\n\n".join("{error_message}".format_map(e) for e in errors) raise InitialSetupError(message) if self.external_reconfig: # set the reconfig flag in kickstart so that # relevant spokes show up services_proxy = SERVICES.get_proxy() services_proxy.SetSetupOnBoot(SETUP_ON_BOOT_RECONFIG)
def wait_for_modules(timeout=600): """Wait for the DBus modules. :param timeout: seconds to the timeout :return: True if the modules are ready, otherwise False """ boss = BOSS.get_proxy() while not boss.AllModulesAvailable and timeout > 0: log.info("Waiting %d sec for modules to be started.", timeout) time.sleep(1) timeout = timeout - 1 if not timeout: log.error("Waiting for modules to be started timed out.") return False return True
def test_evaluation_passwd_minlen_report_only_not_ignored( proxy_getter, rule_data, ksdata_mock, storage_mock): password_proxy_mock = USERS.get_proxy() password_proxy_mock.IsRootPasswordCrypted = False password_proxy_mock.RootPassword = "******" rule_data.new_rule("passwd --minlen=8") # call eval_rules with report_only=False # should set password minimal length to 8 messages = rule_data.eval_rules(ksdata_mock, storage_mock, report_only=False) # Password Policy changed --> no warnings assert not messages assert rule_data._passwd_rules._orig_minlen == 6 assert not rule_data._passwd_rules._orig_strict assert rule_data._passwd_rules._minlen == 8 policy = PasswordPolicy.from_defaults(PASSWORD_POLICY_ROOT) policy.min_length = 8 policy.is_strict = True policies = {PASSWORD_POLICY_ROOT: policy} ui_mock = BOSS.get_proxy(USER_INTERFACE) assert ui_mock.PasswordPolicies == \ PasswordPolicy.to_structure_dict(policies) # call of eval_rules with report_only=True # should not change anything messages = rule_data.eval_rules(ksdata_mock, storage_mock, report_only=True) # Password Policy stayed the same --> no warnings assert not messages assert rule_data._passwd_rules._orig_minlen == 6 assert not rule_data._passwd_rules._orig_strict assert rule_data._passwd_rules._minlen == 8 assert ui_mock.PasswordPolicies == \ PasswordPolicy.to_structure_dict(policies)
def apply_password_policy_from_kickstart(data): """Apply the password policy specified in the kickstart file. FIXME: This is a temporary workaround. Remove the pwpolicy kickstart command in the next major release. :param data: a kickstart data handler """ if not data.anaconda.pwpolicy.seen: log.debug("Using the password policy from the configuration.") return # Set up the UI DBus module. ui_module = BOSS.get_proxy(USER_INTERFACE) policies = {} for pwdata in data.anaconda.pwpolicy.policyList: policy = PasswordPolicy() policy_name = pwdata.name policy.min_quality = pwdata.minquality policy.min_length = pwdata.minlen policy.is_strict = pwdata.strict policy.allow_empty = pwdata.emptyok policies[policy_name] = policy ui_module.SetPasswordPolicies(PasswordPolicy.to_structure_dict(policies)) # Set up the Anaconda configuration. This change will affect only the main # process with UI, because the DBus modules are already running. pwdata = data.anaconda.pwpolicy.get_policy(PASSWORD_POLICY_ROOT, fallback_to_default=True) conf.ui._set_option("can_change_root", pwdata.changesok) pwdata = data.anaconda.pwpolicy.get_policy(PASSWORD_POLICY_USER, fallback_to_default=True) conf.ui._set_option("can_change_users", pwdata.changesok) log.debug("Using the password policy from the kickstart file.")
def _apply(self): # Do not execute sections that were part of the original # anaconda kickstart file (== have .seen flag set) log.info("applying changes") services_proxy = SERVICES.get_proxy() reconfig_mode = services_proxy.SetupOnBoot == SETUP_ON_BOOT_RECONFIG # data.selinux # data.firewall # Configure the timezone. timezone_proxy = TIMEZONE.get_proxy() for task_path in timezone_proxy.InstallWithTasks(): task_proxy = TIMEZONE.get_proxy(task_path) sync_run_task(task_proxy) # Configure the localization. localization_proxy = LOCALIZATION.get_proxy() for task_path in localization_proxy.InstallWithTasks(): task_proxy = LOCALIZATION.get_proxy(task_path) sync_run_task(task_proxy) # Configure persistent hostname network_proxy = NETWORK.get_proxy() network_task = network_proxy.ConfigureHostnameWithTask(True) task_proxy = NETWORK.get_proxy(network_task) sync_run_task(task_proxy) # Set current hostname network_proxy.SetCurrentHostname(network_proxy.Hostname) # Configure groups, users & root account # # NOTE: We only configure groups, users & root account if the respective # kickstart commands are *not* seen in the input kickstart. # This basically means that we will configure only what was # set in the Initial Setup UI and will not attempt to configure # anything that looks like it was configured previously in # the Anaconda UI or installation kickstart. users_proxy = USERS.get_proxy() if self._groups_already_configured and not reconfig_mode: log.debug("skipping user group configuration - already configured") elif users_proxy.Groups: # only run of there are some groups to create groups_task = users_proxy.ConfigureGroupsWithTask() task_proxy = USERS.get_proxy(groups_task) log.debug("configuring user groups via %s task", task_proxy.Name) sync_run_task(task_proxy) if self._users_already_configured and not reconfig_mode: log.debug("skipping user configuration - already configured") elif users_proxy.Users: # only run if there are some users to create users_task = users_proxy.ConfigureUsersWithTask() task_proxy = USERS.get_proxy(users_task) log.debug("configuring users via %s task", task_proxy.Name) sync_run_task(task_proxy) if self._root_password_already_configured and not reconfig_mode: log.debug( "skipping root password configuration - already configured") else: root_task = users_proxy.SetRootPasswordWithTask() task_proxy = USERS.get_proxy(root_task) log.debug("configuring root password via %s task", task_proxy.Name) sync_run_task(task_proxy) # Configure all addons log.info("executing addons") self.data.addons.execute(storage=None, ksdata=self.data, users=None, payload=None) boss_proxy = BOSS.get_proxy() task_path = boss_proxy.InstallSystemWithTask() task_proxy = BOSS.get_proxy(task_path) sync_run_task(task_proxy) if self.external_reconfig: # prevent the reconfig flag from being written out # to kickstart if neither /etc/reconfigSys or /.unconfigured # are present services_proxy = SERVICES.get_proxy() services_proxy.SetSetupOnBoot(SETUP_ON_BOOT_DEFAULT) # Write the kickstart data to file log.info("writing the Initial Setup kickstart file %s", OUTPUT_KICKSTART_PATH) with open(OUTPUT_KICKSTART_PATH, "w") as f: f.write(str(self.data)) log.info("finished writing the Initial Setup kickstart file") # Remove the reconfig files, if any - otherwise the reconfig mode # would start again next time the Initial Setup service is enabled. if self.external_reconfig: for reconfig_file in RECONFIG_FILES: if os.path.exists(reconfig_file): log.debug("removing reconfig trigger file: %s" % reconfig_file) os.remove(reconfig_file) # and we are done with applying changes log.info("all changes have been applied")
def __str__(self): proxy = BOSS.get_proxy() modules = proxy.GenerateKickstart().strip() return super().__str__() + "\n" + modules + "\n\n" + str(self.addons) + str(self.anaconda)
def __str__(self): proxy = BOSS.get_proxy() modules = proxy.GenerateKickstart().strip() return super().__str__() + "\n" + modules
def stop_boss(): """Stop boss by calling Quit() on DBus.""" boss_proxy = BOSS.get_proxy() boss_proxy.Quit()
def set_modules_locale(locale): """Set locale of all modules.""" boss_proxy = BOSS.get_proxy() boss_proxy.SetLocale(locale)
def _prepare_configuration(payload, ksdata): """Configure the installed system.""" configuration_queue = TaskQueue("Configuration queue") # connect progress reporting configuration_queue.queue_started.connect( lambda x: progress_message(x.status_message)) configuration_queue.task_completed.connect(lambda x: progress_step(x.name)) # schedule the execute methods of ksdata that require an installed system to be present os_config = TaskQueue("Installed system configuration", N_("Configuring installed system")) # add installation tasks for the Security DBus module security_proxy = SECURITY.get_proxy() security_dbus_tasks = security_proxy.InstallWithTasks() os_config.append_dbus_tasks(SECURITY, security_dbus_tasks) # add installation tasks for the Services DBus module services_proxy = SERVICES.get_proxy() services_dbus_tasks = services_proxy.InstallWithTasks() os_config.append_dbus_tasks(SERVICES, services_dbus_tasks) # add installation tasks for the Timezone DBus module timezone_proxy = TIMEZONE.get_proxy() timezone_dbus_tasks = timezone_proxy.InstallWithTasks() os_config.append_dbus_tasks(TIMEZONE, timezone_dbus_tasks) # add installation tasks for the Localization DBus module localization_proxy = LOCALIZATION.get_proxy() localization_dbus_tasks = localization_proxy.InstallWithTasks() os_config.append_dbus_tasks(LOCALIZATION, localization_dbus_tasks) # add the Firewall configuration task firewall_proxy = NETWORK.get_proxy(FIREWALL) firewall_dbus_task = firewall_proxy.InstallWithTask() os_config.append_dbus_tasks(NETWORK, [firewall_dbus_task]) configuration_queue.append(os_config) # schedule network configuration (if required) if conf.system.provides_network_config: overwrite = isinstance(payload, LiveImagePayload) network_config = TaskQueue("Network configuration", N_("Writing network configuration")) network_config.append( Task("Network configuration", network.write_configuration, (overwrite, ))) configuration_queue.append(network_config) # add installation tasks for the Users DBus module user_config = TaskQueue("User creation", N_("Creating users")) users_proxy = USERS.get_proxy() users_dbus_tasks = users_proxy.InstallWithTasks() os_config.append_dbus_tasks(USERS, users_dbus_tasks) configuration_queue.append(user_config) # Anaconda addon configuration addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons")) # there is no longer a User class & addons should no longer need it # FIXME: drop user class parameter from the API & all known addons addon_config.append( Task("Configure Anaconda addons", ksdata.addons.execute, (None, ksdata, None, payload))) boss_proxy = BOSS.get_proxy() addon_config.append_dbus_tasks(BOSS, [boss_proxy.InstallSystemWithTask()]) configuration_queue.append(addon_config) # Initramfs generation generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs")) generate_initramfs.append( Task("Generate initramfs", payload.recreate_initrds)) # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is # recreated after the first writeBootLoader call. This reruns it after the new initrd has # been created, fixing the kernel root and subvol args and adding the missing initrd entry. bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) if isinstance(payload, LiveImagePayload): btrfs_task = bootloader_proxy.FixBTRFSWithTask( payload.kernel_version_list) generate_initramfs.append_dbus_tasks(STORAGE, [btrfs_task]) # Invoking zipl should be the last thing done on a s390x installation (see #1652727). zipl_task = bootloader_proxy.FixZIPLWithTask() generate_initramfs.append_dbus_tasks(STORAGE, [zipl_task]) configuration_queue.append(generate_initramfs) # realm join # - this can run only after network is configured in the target system chroot configuration_queue.append_dbus_tasks(SECURITY, [security_proxy.JoinRealmWithTask()]) post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts")) post_scripts.append( Task("Run post installation scripts", runPostScripts, (ksdata.scripts, ))) configuration_queue.append(post_scripts) # setup kexec reboot if requested if flags.flags.kexec: kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec")) kexec_setup.append(Task("Setup kexec", setup_kexec)) configuration_queue.append(kexec_setup) # write anaconda related configs & kickstarts write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts")) # Write the kickstart file to the installed system (or, copy the input # kickstart file over if one exists). if flags.flags.nosave_output_ks: # don't write the kickstart file to the installed system if this has # been disabled by the nosave option log.warning( "Writing of the output kickstart to installed system has been disabled" " by the nosave option.") else: # write anaconda related configs & kickstarts write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, ))) # only add write_configs to the main queue if we actually store some kickstarts/configs if write_configs.task_count: configuration_queue.append(write_configs) return configuration_queue
def _prepare_installation(payload, ksdata): """Perform an installation. This method takes the ksdata as prepared by the UI (the first hub, in graphical mode) and applies it to the disk. The two main tasks for this are putting filesystems onto disks and installing packages onto those filesystems. """ installation_queue = TaskQueue("Installation queue") # connect progress reporting installation_queue.queue_started.connect( lambda x: progress_message(x.status_message)) installation_queue.task_completed.connect(lambda x: progress_step(x.name)) # This should be the only thread running, wait for the others to finish if not. if threadMgr.running > 1: # it could be that the threads finish execution before the task is executed, # but that should not cause any issues def wait_for_all_treads(): for message in ("Thread %s is running" % n for n in threadMgr.names): log.debug(message) threadMgr.wait_all() # Use a queue with a single task as only TaskQueues have the status_message # property used for setting the progress status in the UI. wait_for_threads = TaskQueue( "Wait for threads to finish", N_("Waiting for %s threads to finish") % (threadMgr.running - 1)) wait_for_threads.append( Task("Wait for all threads to finish", wait_for_all_treads)) installation_queue.append(wait_for_threads) # Save system time to HW clock. # - this used to be before waiting on threads, but I don't think that's needed if conf.system.can_set_hardware_clock: # lets just do this as a top-level task - no save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock) installation_queue.append(save_hwclock) # setup the installation environment setup_environment = TaskQueue( "Installation environment setup", N_("Setting up the installation environment")) setup_environment.append( Task("Setup addons", ksdata.addons.setup, (None, ksdata, payload))) boss_proxy = BOSS.get_proxy() setup_environment.append_dbus_tasks( BOSS, [boss_proxy.ConfigureRuntimeWithTask()]) installation_queue.append(setup_environment) # Do partitioning. # Depending on current payload the storage might be apparently configured # either before or after package/payload installation. # So let's have two task queues - early storage & late storage. storage_proxy = STORAGE.get_proxy() early_storage = TaskQueue("Early storage configuration", N_("Configuring storage")) early_storage.append_dbus_tasks(STORAGE, storage_proxy.InstallWithTasks()) if payload.needs_storage_configuration: conf_task = storage_proxy.WriteConfigurationWithTask() early_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(early_storage) # Run %pre-install scripts with the filesystem mounted and no packages pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts")) pre_install_scripts.append( Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts, ))) installation_queue.append(pre_install_scripts) # Do various pre-installation tasks # - try to discover a realm (if any) # - check for possibly needed additional packages. pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks")) # Setup timezone and add chrony as package if timezone was set in KS # and "-chrony" wasn't in packages section and/or --nontp wasn't set. timezone_proxy = TIMEZONE.get_proxy() ntp_excluded = timezone.NTP_PACKAGE in ksdata.packages.excludedList pre_install.append_dbus_tasks( TIMEZONE, [timezone_proxy.ConfigureNTPServiceEnablementWithTask(ntp_excluded)]) # make name resolution work for rpm scripts in chroot if conf.system.provides_resolver_config: # we use a custom Task subclass as the sysroot path has to be resolved # only when the task is actually started, not at task creation time pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot")) # realm discovery security_proxy = SECURITY.get_proxy() pre_install.append_dbus_tasks(SECURITY, [security_proxy.DiscoverRealmWithTask()]) def run_pre_install(): """This means to gather what additional packages (if any) are needed & executing payload.pre_install().""" # anaconda requires storage packages in order to make sure the target # system is bootable and configurable, and some other packages in order # to finish setting up the system. if kernel_arguments.is_enabled("fips"): payload.requirements.add_packages(['/usr/bin/fips-mode-setup'], reason="compliance") payload.requirements.add_groups(payload.language_groups(), reason="language groups") payload.requirements.add_packages(payload.langpacks(), reason="langpacks", strong=False) # add package requirements from modules # - iterate over all modules we know have valid package requirements # - add any requirements found to the payload requirement tracking modules_with_package_requirements = [ SECURITY, NETWORK, TIMEZONE, STORAGE ] for module in modules_with_package_requirements: module_proxy = module.get_proxy() module_requirements = Requirement.from_structure_list( module_proxy.CollectRequirements()) log.debug("Adding requirements for module %s : %s", module, module_requirements) payload.requirements.add_requirements(module_requirements) payload.pre_install() pre_install.append( Task("Find additional packages & run pre_install()", run_pre_install)) installation_queue.append(pre_install) payload_install = TaskQueue("Payload installation", N_("Installing.")) payload_install.append(Task("Install the payload", payload.install)) installation_queue.append(payload_install) # for some payloads storage is configured after the payload is installed if not payload.needs_storage_configuration: late_storage = TaskQueue("Late storage configuration", N_("Configuring storage")) conf_task = storage_proxy.WriteConfigurationWithTask() late_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(late_storage) # Do bootloader. bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader")) def configure_bootloader(): boot_task = bootloader_proxy.ConfigureWithTask( payload.kernel_version_list) sync_run_task(STORAGE.get_proxy(boot_task)) if not payload.handles_bootloader_configuration: # FIXME: This is a temporary workaround, run the DBus task directly. bootloader_install.append( Task("Configure the bootloader", configure_bootloader)) bootloader_install.append_dbus_tasks(STORAGE, [bootloader_proxy.InstallWithTask()]) installation_queue.append(bootloader_install) post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks"))) post_install.append( Task("Run post-installation setup tasks", payload.post_install)) installation_queue.append(post_install) # Create snapshot snapshot_proxy = STORAGE.get_proxy(SNAPSHOT) if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL): snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots")) snapshot_task = snapshot_proxy.CreateWithTask( SNAPSHOT_WHEN_POST_INSTALL) snapshot_creation.append_dbus_tasks(STORAGE, [snapshot_task]) installation_queue.append(snapshot_creation) return installation_queue
def _start_modules(self): """Start the kickstart modules.""" boss_proxy = BOSS.get_proxy() task_path = boss_proxy.StartModulesWithTask() task_proxy = BOSS.get_proxy(task_path) sync_run_task(task_proxy)
def parseKickstart(f, strict_mode=False, pass_to_boss=False): # preprocessing the kickstart file has already been handled in initramfs. addon_paths = collect_addon_paths(ADDON_PATHS) handler = AnacondaKSHandler(addon_paths["ks"]) ksparser = AnacondaKSParser(handler) kswarnings = [] ksmodule = "pykickstart" kscategories = (UserWarning, SyntaxWarning, DeprecationWarning) showwarning = warnings.showwarning def ksshowwarning(message, category, filename, lineno, file=None, line=None): # Print the warning with default function. showwarning(message, category, filename, lineno, file, line) # Collect pykickstart warnings. if ksmodule in filename and issubclass(category, kscategories): kswarnings.append(message) try: # Process warnings differently in this part. with warnings.catch_warnings(): # Set up the warnings module. warnings.showwarning = ksshowwarning for category in kscategories: warnings.filterwarnings(action="always", module=ksmodule, category=category) # Parse the kickstart file in DBus modules. if pass_to_boss: boss = BOSS.get_proxy() boss.SplitKickstart(f) errors = boss.DistributeKickstart() if errors: message = "\n\n".join("{error_message}".format_map(e) for e in errors) raise KickstartError(message) # Parse the kickstart file in anaconda. ksparser.readKickstart(f) # Process pykickstart warnings in the strict mode: if strict_mode and kswarnings: raise KickstartError( "Please modify your kickstart file to fix the warnings " "or remove the `ksstrict` option.") except (KickstartError, SplitKickstartError) as e: # We do not have an interface here yet, so we cannot use our error # handling callback. parsing_log.error(e) # Print kickstart warnings in the strict mode. if strict_mode and kswarnings: print( _("\nSome warnings occurred during reading the kickstart file:" )) for w in kswarnings: print(str(w).strip()) # Print an error and terminate. print( _("\nAn error occurred during reading the kickstart file:" "\n%s\n\nThe installer will now terminate.") % str(e).strip()) util.ipmi_report(IPMI_ABORTED) time.sleep(10) sys.exit(1) return handler
def _prepare_installation(payload, ksdata): """Perform an installation. This method takes the ksdata as prepared by the UI (the first hub, in graphical mode) and applies it to the disk. The two main tasks for this are putting filesystems onto disks and installing packages onto those filesystems. """ installation_queue = TaskQueue("Installation queue") # connect progress reporting installation_queue.queue_started.connect( lambda x: progress_message(x.status_message)) installation_queue.task_completed.connect(lambda x: progress_step(x.name)) # This should be the only thread running, wait for the others to finish if not. if threadMgr.running > 1: # it could be that the threads finish execution before the task is executed, # but that should not cause any issues def wait_for_all_treads(): for message in ("Thread %s is running" % n for n in threadMgr.names): log.debug(message) threadMgr.wait_all() # Use a queue with a single task as only TaskQueues have the status_message # property used for setting the progress status in the UI. wait_for_threads = TaskQueue( "Wait for threads to finish", N_("Waiting for %s threads to finish") % (threadMgr.running - 1)) wait_for_threads.append( Task("Wait for all threads to finish", wait_for_all_treads)) installation_queue.append(wait_for_threads) # Save system time to HW clock. # - this used to be before waiting on threads, but I don't think that's needed if conf.system.can_set_hardware_clock: # lets just do this as a top-level task - no save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock) installation_queue.append(save_hwclock) # setup the installation environment setup_environment = TaskQueue( "Installation environment setup", N_("Setting up the installation environment")) setup_environment.append( Task("Setup addons", ksdata.addons.setup, (None, ksdata, payload))) boss_proxy = BOSS.get_proxy() setup_environment.append_dbus_tasks( BOSS, [boss_proxy.ConfigureRuntimeWithTask()]) installation_queue.append(setup_environment) # Do partitioning. # Depending on current payload the storage might be apparently configured # either before or after package/payload installation. # So let's have two task queues - early storage & late storage. storage_proxy = STORAGE.get_proxy() early_storage = TaskQueue("Early storage configuration", N_("Configuring storage")) early_storage.append_dbus_tasks(STORAGE, storage_proxy.InstallWithTasks()) if payload.type == PAYLOAD_TYPE_DNF: conf_task = storage_proxy.WriteConfigurationWithTask() early_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(early_storage) # Run %pre-install scripts with the filesystem mounted and no packages pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts")) pre_install_scripts.append( Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts, ))) installation_queue.append(pre_install_scripts) # Do various pre-installation tasks # - try to discover a realm (if any) # - check for possibly needed additional packages. pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks")) # make name resolution work for rpm scripts in chroot if conf.system.provides_resolver_config: # we use a custom Task subclass as the sysroot path has to be resolved # only when the task is actually started, not at task creation time pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot")) # realm discovery security_proxy = SECURITY.get_proxy() pre_install.append_dbus_tasks(SECURITY, [security_proxy.DiscoverRealmWithTask()]) # Set up FIPS for the payload installation. fips_task = security_proxy.PreconfigureFIPSWithTask(payload.type) pre_install.append_dbus_tasks(SECURITY, [fips_task]) # Install the payload. pre_install.append( Task("Find additional packages & run pre_install()", payload.pre_install)) installation_queue.append(pre_install) payload_install = TaskQueue("Payload installation", N_("Installing.")) payload_install.append(Task("Install the payload", payload.install)) installation_queue.append(payload_install) # for some payloads storage is configured after the payload is installed if payload.type != PAYLOAD_TYPE_DNF: late_storage = TaskQueue("Late storage configuration", N_("Configuring storage")) conf_task = storage_proxy.WriteConfigurationWithTask() late_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(late_storage) # Do bootloader. bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader")) def run_install_bootloader(): tasks = bootloader_proxy.InstallBootloaderWithTasks( payload.type, payload.kernel_version_list) for task in tasks: sync_run_task(STORAGE.get_proxy(task)) bootloader_install.append( Task("Install bootloader", run_install_bootloader)) installation_queue.append(bootloader_install) post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks"))) post_install.append( Task("Run post-installation setup tasks", payload.post_install)) installation_queue.append(post_install) # Create snapshot snapshot_proxy = STORAGE.get_proxy(SNAPSHOT) if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL): snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots")) snapshot_task = snapshot_proxy.CreateWithTask( SNAPSHOT_WHEN_POST_INSTALL) snapshot_creation.append_dbus_tasks(STORAGE, [snapshot_task]) installation_queue.append(snapshot_creation) return installation_queue
def _prepare_configuration(payload, ksdata): """Configure the installed system.""" configuration_queue = TaskQueue("Configuration queue") # connect progress reporting configuration_queue.queue_started.connect( lambda x: progress_message(x.status_message)) configuration_queue.task_completed.connect(lambda x: progress_step(x.name)) # add installation tasks for the Subscription DBus module if is_module_available(SUBSCRIPTION): # we only run the tasks if the Subscription module is available subscription_config = TaskQueue("Subscription configuration", N_("Configuring Red Hat subscription")) subscription_proxy = SUBSCRIPTION.get_proxy() subscription_dbus_tasks = subscription_proxy.InstallWithTasks() subscription_config.append_dbus_tasks(SUBSCRIPTION, subscription_dbus_tasks) configuration_queue.append(subscription_config) # schedule the execute methods of ksdata that require an installed system to be present os_config = TaskQueue("Installed system configuration", N_("Configuring installed system")) # add installation tasks for the Security DBus module security_proxy = SECURITY.get_proxy() security_dbus_tasks = security_proxy.InstallWithTasks() os_config.append_dbus_tasks(SECURITY, security_dbus_tasks) # add installation tasks for the Timezone DBus module # run these tasks before tasks of the Services module if is_module_available(TIMEZONE): timezone_proxy = TIMEZONE.get_proxy() timezone_dbus_tasks = timezone_proxy.InstallWithTasks() os_config.append_dbus_tasks(TIMEZONE, timezone_dbus_tasks) # add installation tasks for the Services DBus module services_proxy = SERVICES.get_proxy() services_dbus_tasks = services_proxy.InstallWithTasks() os_config.append_dbus_tasks(SERVICES, services_dbus_tasks) # add installation tasks for the Localization DBus module if is_module_available(LOCALIZATION): localization_proxy = LOCALIZATION.get_proxy() localization_dbus_tasks = localization_proxy.InstallWithTasks() os_config.append_dbus_tasks(LOCALIZATION, localization_dbus_tasks) # add the Firewall configuration task if conf.target.can_configure_network: firewall_proxy = NETWORK.get_proxy(FIREWALL) firewall_dbus_task = firewall_proxy.InstallWithTask() os_config.append_dbus_tasks(NETWORK, [firewall_dbus_task]) configuration_queue.append(os_config) # schedule network configuration (if required) if conf.target.can_configure_network and conf.system.provides_network_config: overwrite = payload.type in PAYLOAD_LIVE_TYPES network_config = TaskQueue("Network configuration", N_("Writing network configuration")) network_config.append( Task("Network configuration", network.write_configuration, (overwrite, ))) configuration_queue.append(network_config) # add installation tasks for the Users DBus module if is_module_available(USERS): user_config = TaskQueue("User creation", N_("Creating users")) users_proxy = USERS.get_proxy() users_dbus_tasks = users_proxy.InstallWithTasks() user_config.append_dbus_tasks(USERS, users_dbus_tasks) configuration_queue.append(user_config) # Anaconda addon configuration addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons")) # there is no longer a User class & addons should no longer need it # FIXME: drop user class parameter from the API & all known addons addon_config.append( Task("Configure Anaconda addons", ksdata.addons.execute, (None, ksdata, None, payload))) boss_proxy = BOSS.get_proxy() addon_config.append_dbus_tasks(BOSS, [boss_proxy.InstallSystemWithTask()]) configuration_queue.append(addon_config) # Initramfs generation generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs")) bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) def run_generate_initramfs(): tasks = bootloader_proxy.GenerateInitramfsWithTasks( payload.type, payload.kernel_version_list) for task in tasks: sync_run_task(STORAGE.get_proxy(task)) generate_initramfs.append( Task("Generate initramfs", run_generate_initramfs)) configuration_queue.append(generate_initramfs) # Configure FIPS. configuration_queue.append_dbus_tasks( SECURITY, [security_proxy.ConfigureFIPSWithTask()]) # realm join # - this can run only after network is configured in the target system chroot configuration_queue.append_dbus_tasks(SECURITY, [security_proxy.JoinRealmWithTask()]) post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts")) post_scripts.append( Task("Run post installation scripts", runPostScripts, (ksdata.scripts, ))) configuration_queue.append(post_scripts) # setup kexec reboot if requested if flags.flags.kexec: kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec")) kexec_setup.append(Task("Setup kexec", setup_kexec)) configuration_queue.append(kexec_setup) # write anaconda related configs & kickstarts write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts")) # Write the kickstart file to the installed system (or, copy the input # kickstart file over if one exists). if flags.flags.nosave_output_ks: # don't write the kickstart file to the installed system if this has # been disabled by the nosave option log.warning( "Writing of the output kickstart to installed system has been disabled" " by the nosave option.") else: # write anaconda related configs & kickstarts write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, ))) # only add write_configs to the main queue if we actually store some kickstarts/configs if write_configs.task_count: configuration_queue.append(write_configs) return configuration_queue
def _prepare_installation(storage, payload, ksdata): """Perform an installation. This method takes the ksdata as prepared by the UI (the first hub, in graphical mode) and applies it to the disk. The two main tasks for this are putting filesystems onto disks and installing packages onto those filesystems. """ bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED can_install_bootloader = not conf.target.is_directory and bootloader_enabled installation_queue = TaskQueue("Installation queue") # connect progress reporting installation_queue.queue_started.connect( lambda x: progress_message(x.status_message)) installation_queue.task_completed.connect(lambda x: progress_step(x.name)) # This should be the only thread running, wait for the others to finish if not. if threadMgr.running > 1: # it could be that the threads finish execution before the task is executed, # but that should not cause any issues def wait_for_all_treads(): for message in ("Thread %s is running" % n for n in threadMgr.names): log.debug(message) threadMgr.wait_all() # Use a queue with a single task as only TaskQueues have the status_message # property used for setting the progress status in the UI. wait_for_threads = TaskQueue( "Wait for threads to finish", N_("Waiting for %s threads to finish") % (threadMgr.running - 1)) wait_for_threads.append( Task("Wait for all threads to finish", wait_for_all_treads)) installation_queue.append(wait_for_threads) # Save system time to HW clock. # - this used to be before waiting on threads, but I don't think that's needed if conf.system.can_set_hardware_clock: # lets just do this as a top-level task - no save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock) installation_queue.append(save_hwclock) # setup the installation environment setup_environment = TaskQueue( "Installation environment setup", N_("Setting up the installation environment")) setup_environment.append( Task("Setup addons", ksdata.addons.setup, (storage, ksdata, payload))) boss_proxy = BOSS.get_proxy() setup_environment.append_dbus_tasks( BOSS, [boss_proxy.ConfigureRuntimeWithTask()]) installation_queue.append(setup_environment) # Do partitioning. # Depending on current payload the storage might be apparently configured # either before or after package/payload installation. # So let's have two task queues - early storage & late storage. early_storage = TaskQueue("Early storage configuration", N_("Configuring storage")) # put custom storage info into ksdata early_storage.append( Task("Insert custom storage to ksdata", task=update_storage_ksdata, task_args=(storage, ksdata))) # callbacks for blivet message_clbk = lambda clbk_data: progress_message(clbk_data.msg) entropy_wait_clbk = lambda clbk_data: wait_for_entropy( clbk_data.msg, clbk_data.min_entropy, ksdata) callbacks_reg = callbacks.create_new_callbacks_register( create_format_pre=message_clbk, resize_format_pre=message_clbk, wait_for_entropy=entropy_wait_clbk) if not conf.target.is_directory: early_storage.append( Task("Activate filesystems", task=turn_on_filesystems, task_args=(storage, ), task_kwargs={"callbacks": callbacks_reg})) early_storage.append( Task("Mount filesystems", task=storage.mount_filesystems)) if payload.needs_storage_configuration and not conf.target.is_directory: early_storage.append( Task("Write early storage", task=write_storage_configuration, task_args=(storage, ))) installation_queue.append(early_storage) # Run %pre-install scripts with the filesystem mounted and no packages pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts")) pre_install_scripts.append( Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts, ))) installation_queue.append(pre_install_scripts) # Do various pre-installation tasks # - try to discover a realm (if any) # - check for possibly needed additional packages. pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks")) # Setup timezone and add chrony as package if timezone was set in KS # and "-chrony" wasn't in packages section and/or --nontp wasn't set. pre_install.append( Task("Setup timezone", ksdata.timezone.setup, (ksdata, ))) # make name resolution work for rpm scripts in chroot if conf.system.provides_resolver_config: # we use a custom Task subclass as the sysroot path has to be resolved # only when the task is actually started, not at task creation time pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot")) # realm discovery security_proxy = SECURITY.get_proxy() pre_install.append_dbus_tasks(SECURITY, [security_proxy.DiscoverRealmWithTask()]) def run_pre_install(): """This means to gather what additional packages (if any) are needed & executing payload.pre_install().""" # anaconda requires storage packages in order to make sure the target # system is bootable and configurable, and some other packages in order # to finish setting up the system. payload.requirements.add_packages(storage.packages, reason="storage") payload.requirements.add_packages(ksdata.timezone.packages, reason="ntp", strong=False) if can_install_bootloader: payload.requirements.add_packages(storage.bootloader.packages, reason="bootloader") if kernel_arguments.is_enabled("fips"): payload.requirements.add_packages(['/usr/bin/fips-mode-setup'], reason="compliance") payload.requirements.add_groups(payload.language_groups(), reason="language groups") payload.requirements.add_packages(payload.langpacks(), reason="langpacks", strong=False) # add package requirements from modules # - iterate over all modules we know have valid package requirements # - add any requirements found to the payload requirement tracking modules_with_package_requirements = [SECURITY, NETWORK] for module in modules_with_package_requirements: module_proxy = module.get_proxy() module_requirements = Requirement.from_structure_list( module_proxy.CollectRequirements()) log.debug("Adding requirements for module %s : %s", module, module_requirements) payload.requirements.add_requirements(module_requirements) payload.pre_install() pre_install.append( Task("Find additional packages & run pre_install()", run_pre_install)) installation_queue.append(pre_install) payload_install = TaskQueue("Payload installation", N_("Installing.")) payload_install.append(Task("Install the payload", payload.install)) installation_queue.append(payload_install) # for some payloads storage is configured after the payload is installed if not payload.needs_storage_configuration and not conf.target.is_directory: late_storage = TaskQueue("Late storage configuration", N_("Configuring storage")) late_storage.append( Task("Write late storage", task=write_storage_configuration, task_args=(storage, ))) installation_queue.append(late_storage) # Do bootloader. if can_install_bootloader: bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader")) bootloader_install.append( Task("Install bootloader", write_boot_loader, (storage, payload))) installation_queue.append(bootloader_install) post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks"))) post_install.append( Task("Run post-installation setup tasks", payload.post_install)) installation_queue.append(post_install) # Create snapshot snapshot_proxy = STORAGE.get_proxy(SNAPSHOT) if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL): snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots")) snapshot_requests = ksdata.snapshot.get_requests( SNAPSHOT_WHEN_POST_INSTALL) snapshot_task = SnapshotCreateTask(storage, snapshot_requests, SNAPSHOT_WHEN_POST_INSTALL) snapshot_creation.append( Task("Create post-install snapshots", snapshot_task.run)) installation_queue.append(snapshot_creation) return installation_queue
def _stop_boss_and_modules(self): """Stop the boss and the kickstart modules.""" boss_proxy = BOSS.get_proxy() boss_proxy.Quit()
def parseKickstart(f, strict_mode=False, pass_to_boss=False): # preprocessing the kickstart file has already been handled in initramfs. addon_paths = collect_addon_paths(ADDON_PATHS) handler = AnacondaKSHandler(addon_paths["ks"]) ksparser = AnacondaKSParser(handler) # So that drives onlined by these can be used in the ks file blivet.iscsi.iscsi.startup() # Note we do NOT call dasd.startup() here, that does not online drives, but # only checks if they need formatting, which requires zerombr to be known kswarnings = [] ksmodule = "pykickstart" kscategories = (UserWarning, SyntaxWarning, DeprecationWarning) showwarning = warnings.showwarning def ksshowwarning(message, category, filename, lineno, file=None, line=None): # Print the warning with default function. showwarning(message, category, filename, lineno, file, line) # Collect pykickstart warnings. if ksmodule in filename and issubclass(category, kscategories): kswarnings.append(message) try: # Process warnings differently in this part. with warnings.catch_warnings(): # Set up the warnings module. warnings.showwarning = ksshowwarning for category in kscategories: warnings.filterwarnings(action="always", module=ksmodule, category=category) # Parse the kickstart file in DBus modules. if pass_to_boss: boss = BOSS.get_proxy() boss.SplitKickstart(f) errors = boss.DistributeKickstart() if errors: message = "\n\n".join("{error_message}".format_map(e) for e in errors) raise KickstartError(message) # Parse the kickstart file in anaconda. ksparser.readKickstart(f) # Process pykickstart warnings in the strict mode: if strict_mode and kswarnings: raise KickstartError("Please modify your kickstart file to fix the warnings " "or remove the `ksstrict` option.") except (KickstartError, SplitKickstartError) as e: # We do not have an interface here yet, so we cannot use our error # handling callback. parsing_log.error(e) # Print kickstart warnings in the strict mode. if strict_mode and kswarnings: print(_("\nSome warnings occurred during reading the kickstart file:")) for w in kswarnings: print(str(w).strip()) # Print an error and terminate. print(_("\nAn error occurred during reading the kickstart file:" "\n%s\n\nThe installer will now terminate.") % str(e).strip()) util.ipmi_report(IPMI_ABORTED) time.sleep(10) sys.exit(1) return handler