def setup(self, ksdata): timezone_proxy = TIMEZONE.get_proxy() services_proxy = SERVICES.get_proxy() enabled_services = services_proxy.EnabledServices disabled_services = services_proxy.DisabledServices # do not install and use NTP package if not timezone_proxy.NTPEnabled or NTP_PACKAGE in ksdata.packages.excludedList: if util.service_running(NTP_SERVICE) and conf.system.can_set_time_synchronization: ret = util.stop_service(NTP_SERVICE) if ret != 0: timezone_log.error("Failed to stop NTP service") if NTP_SERVICE not in disabled_services: disabled_services.append(NTP_SERVICE) services_proxy.SetDisabledServices(disabled_services) # install and use NTP package else: if not util.service_running(NTP_SERVICE) and conf.system.can_set_time_synchronization: ret = util.start_service(NTP_SERVICE) if ret != 0: timezone_log.error("Failed to start NTP service") self.packages.append(NTP_PACKAGE) if not NTP_SERVICE in enabled_services and \ not NTP_SERVICE in disabled_services: enabled_services.append(NTP_SERVICE) services_proxy.SetEnabledServices(enabled_services)
def apply(self): (store, itr) = self._localeSelection.get_selected() locale = store[itr][1] locale = localization.setup_locale(locale, self._l12_module.proxy, text_mode=False) self._set_lang(locale) # Skip timezone and keyboard default setting for kickstart installs. # The user may have provided these values via kickstart and if not, we # need to prompt for them. But do continue if geolocation-with-kickstart # is enabled. if flags.flags.automatedInstall and not geoloc.geoloc.enabled: return timezone_proxy = TIMEZONE.get_proxy() loc_timezones = localization.get_locale_timezones(self._l12_module.proxy.Language) if geoloc.geoloc.result.timezone: # (the geolocation module makes sure that the returned timezone is # either a valid timezone or None) log.info("using timezone determined by geolocation") timezone_proxy.SetTimezone(geoloc.geoloc.result.timezone) # Either this is an interactive install and timezone.seen propagates # from the interactive default kickstart, or this is a kickstart # install where the user explicitly requested geolocation to be used. # So set timezone.seen to True, so that the user isn't forced to # enter the Date & Time spoke to acknowledge the timezone detected # by geolocation before continuing the installation. timezone_proxy.SetKickstarted(True) elif loc_timezones and not timezone_proxy.Timezone: # no data is provided by Geolocation, try to get timezone from the # current language log.info("geolocation not finished in time, using default timezone") timezone_proxy.SetTimezone(loc_timezones[0])
def __init__(self, data, storage, payload): NormalTUISpoke.__init__(self, data, storage, payload) self.title = N_("Time settings") self._timezone_spoke = None self._container = None # we use an ordered dict to keep the NTP server insertion order self._ntp_servers = OrderedDict() self._ntp_servers_lock = RLock() self._timezone_module = TIMEZONE.get_observer() self._timezone_module.connect()
def __init__(self, *args): NormalSpoke.__init__(self, *args) # taking values from the kickstart file? self._kickstarted = flags.flags.automatedInstall self._update_datetime_timer = None self._start_updating_timer = None self._shown = False self._tz = None self._timezone_module = TIMEZONE.get_observer() self._timezone_module.connect() self._network_module = NETWORK.get_observer() self._network_module.connect()
def __init__(self, data, storage, payload): super().__init__(data, storage, payload) self.title = N_("Timezone settings") self._container = None # it's stupid to call get_all_regions_and_timezones twice, but regions # needs to be unsorted in order to display in the same order as the GUI # so whatever self._regions = list(timezone.get_all_regions_and_timezones().keys()) self._timezones = dict((k, sorted(v)) for k, v in timezone.get_all_regions_and_timezones().items()) self._lower_regions = [r.lower() for r in self._regions] self._zones = ["%s/%s" % (region, z) for region in self._timezones for z in self._timezones[region]] # for lowercase lookup self._lower_zones = [z.lower().replace("_", " ") for region in self._timezones for z in self._timezones[region]] self._selection = "" self._timezone_module = TIMEZONE.get_observer() self._timezone_module.connect()
def _set_ntp_servers_from_dhcp(): """Set NTP servers of timezone module from dhcp if not set by kickstart.""" timezone_proxy = TIMEZONE.get_proxy() ntp_servers = get_ntp_servers_from_dhcp(get_nm_client()) log.info("got %d NTP servers from DHCP", len(ntp_servers)) hostnames = [] for server_address in ntp_servers: try: hostname = socket.gethostbyaddr(server_address)[0] except socket.error: # getting hostname failed, just use the address returned from DHCP log.debug("getting NTP server host name failed for address: %s", server_address) hostname = server_address hostnames.append(hostname) # check if some NTP servers were specified from kickstart if not timezone_proxy.NTPServers and conf.target.is_hardware: # no NTP servers were specified, add those from DHCP timezone_proxy.SetNTPServers(hostnames)
def save_hw_clock(timezone_proxy=None): """ Save system time to HW clock. :param timezone_proxy: DBus proxy of the timezone module """ if arch.is_s390(): return if not timezone_proxy: timezone_proxy = TIMEZONE.get_proxy() cmd = "hwclock" args = ["--systohc"] if timezone_proxy.IsUTC: args.append("--utc") else: args.append("--local") util.execWithRedirect(cmd, args)
def execute(self): # get the DBus proxies timezone_proxy = TIMEZONE.get_proxy() # write out timezone configuration kickstart_timezone = timezone_proxy.Timezone if not timezone.is_valid_timezone(kickstart_timezone): # this should never happen, but for pity's sake timezone_log.warning("Timezone %s set in kickstart is not valid, falling " "back to default (America/New_York).", kickstart_timezone) timezone_proxy.SetTimezone("America/New_York") timezone.write_timezone_config(timezone_proxy, util.getSysroot()) # write out NTP configuration (if set) and --nontp is not used kickstart_ntp_servers = timezone_proxy.NTPServers if timezone_proxy.NTPEnabled and kickstart_ntp_servers: chronyd_conf_path = os.path.normpath(util.getSysroot() + ntp.NTP_CONFIG_FILE) pools, servers = ntp.internal_to_pools_and_servers(kickstart_ntp_servers) if os.path.exists(chronyd_conf_path): timezone_log.debug("Modifying installed chrony configuration") try: ntp.save_servers_to_config(pools, servers, conf_file_path=chronyd_conf_path) except ntp.NTPconfigError as ntperr: timezone_log.warning("Failed to save NTP configuration: %s", ntperr) # use chrony conf file from installation environment when # chrony is not installed (chrony conf file is missing) else: timezone_log.debug("Creating chrony configuration based on the " "configuration from installation environment") try: ntp.save_servers_to_config(pools, servers, conf_file_path=ntp.NTP_CONFIG_FILE, out_file_path=chronyd_conf_path) except ntp.NTPconfigError as ntperr: timezone_log.warning("Failed to save NTP configuration without chrony package: %s", ntperr)
def __str__(self): timezone_proxy = TIMEZONE.get_proxy() return timezone_proxy.GenerateKickstart()
def _prepare_configuration(payload, ksdata): """Configure the installed system.""" configuration_queue = TaskQueue("Configuration queue") # connect progress reporting configuration_queue.queue_started.connect( lambda x: progress_message(x.status_message)) configuration_queue.task_completed.connect(lambda x: progress_step(x.name)) # add installation tasks for the Subscription DBus module if is_module_available(SUBSCRIPTION): # we only run the tasks if the Subscription module is available subscription_config = TaskQueue("Subscription configuration", N_("Configuring Red Hat subscription")) subscription_proxy = SUBSCRIPTION.get_proxy() subscription_dbus_tasks = subscription_proxy.InstallWithTasks() subscription_config.append_dbus_tasks(SUBSCRIPTION, subscription_dbus_tasks) configuration_queue.append(subscription_config) # schedule the execute methods of ksdata that require an installed system to be present os_config = TaskQueue("Installed system configuration", N_("Configuring installed system")) # add installation tasks for the Security DBus module if is_module_available(SECURITY): security_proxy = SECURITY.get_proxy() security_dbus_tasks = security_proxy.InstallWithTasks() os_config.append_dbus_tasks(SECURITY, security_dbus_tasks) # add installation tasks for the Timezone DBus module # run these tasks before tasks of the Services module if is_module_available(TIMEZONE): timezone_proxy = TIMEZONE.get_proxy() timezone_dbus_tasks = timezone_proxy.InstallWithTasks() os_config.append_dbus_tasks(TIMEZONE, timezone_dbus_tasks) # add installation tasks for the Services DBus module if is_module_available(SERVICES): services_proxy = SERVICES.get_proxy() services_dbus_tasks = services_proxy.InstallWithTasks() os_config.append_dbus_tasks(SERVICES, services_dbus_tasks) # add installation tasks for the Localization DBus module if is_module_available(LOCALIZATION): localization_proxy = LOCALIZATION.get_proxy() localization_dbus_tasks = localization_proxy.InstallWithTasks() os_config.append_dbus_tasks(LOCALIZATION, localization_dbus_tasks) # add the Firewall configuration task if conf.target.can_configure_network: firewall_proxy = NETWORK.get_proxy(FIREWALL) firewall_dbus_task = firewall_proxy.InstallWithTask() os_config.append_dbus_tasks(NETWORK, [firewall_dbus_task]) configuration_queue.append(os_config) # schedule network configuration (if required) if conf.target.can_configure_network and conf.system.provides_network_config: overwrite = payload.type in PAYLOAD_LIVE_TYPES network_config = TaskQueue("Network configuration", N_("Writing network configuration")) network_config.append( Task("Network configuration", network.write_configuration, (overwrite, ))) configuration_queue.append(network_config) # add installation tasks for the Users DBus module if is_module_available(USERS): user_config = TaskQueue("User creation", N_("Creating users")) users_proxy = USERS.get_proxy() users_dbus_tasks = users_proxy.InstallWithTasks() user_config.append_dbus_tasks(USERS, users_dbus_tasks) configuration_queue.append(user_config) # Anaconda addon configuration addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons")) boss_proxy = BOSS.get_proxy() for service_name, object_path in boss_proxy.CollectInstallSystemTasks(): task_proxy = DBus.get_proxy(service_name, object_path) addon_config.append(DBusTask(task_proxy)) configuration_queue.append(addon_config) # Initramfs generation generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs")) bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) def run_generate_initramfs(): tasks = bootloader_proxy.GenerateInitramfsWithTasks( payload.type, payload.kernel_version_list) for task in tasks: sync_run_task(STORAGE.get_proxy(task)) generate_initramfs.append( Task("Generate initramfs", run_generate_initramfs)) configuration_queue.append(generate_initramfs) if is_module_available(SECURITY): security_proxy = SECURITY.get_proxy() # Configure FIPS. configuration_queue.append_dbus_tasks( SECURITY, [security_proxy.ConfigureFIPSWithTask()]) # Join a realm. This can run only after network is configured in the target system chroot. configuration_queue.append_dbus_tasks( SECURITY, [security_proxy.JoinRealmWithTask()]) # setup kexec reboot if requested if flags.flags.kexec: kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec")) kexec_setup.append(Task("Setup kexec", setup_kexec)) configuration_queue.append(kexec_setup) # write anaconda related configs & kickstarts write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts")) # Write the kickstart file to the installed system (or, copy the input # kickstart file over if one exists). if flags.flags.nosave_output_ks: # don't write the kickstart file to the installed system if this has # been disabled by the nosave option log.warning( "Writing of the output kickstart to installed system has been disabled" " by the nosave option.") else: # write anaconda related configs & kickstarts write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, ))) # only add write_configs to the main queue if we actually store some kickstarts/configs if write_configs.task_count: configuration_queue.append(write_configs) post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts")) post_scripts.append( Task("Run post installation scripts", runPostScripts, (ksdata.scripts, ))) configuration_queue.append(post_scripts) return configuration_queue
# Specify protected devices. from pyanaconda.modules.common.constants.services import STORAGE protected_devices = anaconda.get_protected_devices(opts) disk_select_proxy.SetProtectedDevices(protected_devices) from pyanaconda.payload.manager import payloadMgr from pyanaconda.timezone import time_initialize if not conf.target.is_directory: threadMgr.add(AnacondaThread(name=constants.THREAD_STORAGE, target=reset_storage, args=(anaconda.storage, ))) from pyanaconda.modules.common.constants.services import TIMEZONE timezone_proxy = TIMEZONE.get_proxy() if conf.system.can_initialize_system_clock: threadMgr.add(AnacondaThread(name=constants.THREAD_TIME_INIT, target=time_initialize, args=(timezone_proxy, anaconda.storage))) if flags.rescue_mode: rescue.start_rescue_mode_ui(anaconda) else: startup_utils.clean_pstore() # add our own additional signal handlers signal.signal(signal.SIGUSR1, lambda signum, frame: exception.test_exception_handling())
ignored_disks = disk_select_proxy.IgnoredDisks if oemdrv_disk not in ignored_disks: log.info("Adding disk %s labeled OEMDRV to ignored disks", oemdrv_disk) ignored_disks.append(oemdrv_disk) disk_select_proxy.SetIgnoredDisks(ignored_disks) from pyanaconda.payload import payloadMgr from pyanaconda.timezone import time_initialize if not conf.target.is_directory: threadMgr.add(AnacondaThread(name=constants.THREAD_STORAGE, target=storage_initialize, args=(anaconda.storage, ksdata, anaconda.protected))) from pyanaconda.modules.common.constants.services import TIMEZONE timezone_proxy = TIMEZONE.get_proxy() if conf.system.can_initialize_system_clock: threadMgr.add(AnacondaThread(name=constants.THREAD_TIME_INIT, target=time_initialize, args=(timezone_proxy, anaconda.storage, anaconda.bootloader))) if flags.rescue_mode: rescue.start_rescue_mode_ui(anaconda) else: startup_utils.clean_pstore() # only install interactive exception handler in interactive modes if ksdata.displaymode.displayMode != pykickstart_constants.DISPLAY_MODE_CMDLINE or flags.debug:
def __str__(self): timezone_proxy = TIMEZONE.get_proxy() return timezone_proxy.GenerateKickstart()
def _apply(self): # Do not execute sections that were part of the original # anaconda kickstart file (== have .seen flag set) log.info("applying changes") services_proxy = SERVICES.get_proxy() reconfig_mode = services_proxy.SetupOnBoot == SETUP_ON_BOOT_RECONFIG # data.selinux # data.firewall # Configure the timezone. timezone_proxy = TIMEZONE.get_proxy() for task_path in timezone_proxy.InstallWithTasks(): task_proxy = TIMEZONE.get_proxy(task_path) sync_run_task(task_proxy) # Configure the localization. localization_proxy = LOCALIZATION.get_proxy() for task_path in localization_proxy.InstallWithTasks(): task_proxy = LOCALIZATION.get_proxy(task_path) sync_run_task(task_proxy) # Configure persistent hostname network_proxy = NETWORK.get_proxy() network_task = network_proxy.ConfigureHostnameWithTask(True) task_proxy = NETWORK.get_proxy(network_task) sync_run_task(task_proxy) # Set current hostname network_proxy.SetCurrentHostname(network_proxy.Hostname) # Configure groups, users & root account # # NOTE: We only configure groups, users & root account if the respective # kickstart commands are *not* seen in the input kickstart. # This basically means that we will configure only what was # set in the Initial Setup UI and will not attempt to configure # anything that looks like it was configured previously in # the Anaconda UI or installation kickstart. users_proxy = USERS.get_proxy() if self._groups_already_configured and not reconfig_mode: log.debug("skipping user group configuration - already configured") elif users_proxy.Groups: # only run of there are some groups to create groups_task = users_proxy.ConfigureGroupsWithTask() task_proxy = USERS.get_proxy(groups_task) log.debug("configuring user groups via %s task", task_proxy.Name) sync_run_task(task_proxy) if self._users_already_configured and not reconfig_mode: log.debug("skipping user configuration - already configured") elif users_proxy.Users: # only run if there are some users to create users_task = users_proxy.ConfigureUsersWithTask() task_proxy = USERS.get_proxy(users_task) log.debug("configuring users via %s task", task_proxy.Name) sync_run_task(task_proxy) if self._root_password_already_configured and not reconfig_mode: log.debug( "skipping root password configuration - already configured") else: root_task = users_proxy.SetRootPasswordWithTask() task_proxy = USERS.get_proxy(root_task) log.debug("configuring root password via %s task", task_proxy.Name) sync_run_task(task_proxy) # Configure all addons log.info("executing addons") boss_proxy = BOSS.get_proxy() for service_name, object_path in boss_proxy.CollectInstallSystemTasks( ): task_proxy = DBus.get_proxy(service_name, object_path) sync_run_task(task_proxy) if self.external_reconfig: # prevent the reconfig flag from being written out # to kickstart if neither /etc/reconfigSys or /.unconfigured # are present services_proxy = SERVICES.get_proxy() services_proxy.SetSetupOnBoot(SETUP_ON_BOOT_DEFAULT) # Write the kickstart data to file log.info("writing the Initial Setup kickstart file %s", OUTPUT_KICKSTART_PATH) with open(OUTPUT_KICKSTART_PATH, "w") as f: f.write(str(self.data)) log.info("finished writing the Initial Setup kickstart file") # Remove the reconfig files, if any - otherwise the reconfig mode # would start again next time the Initial Setup service is enabled. if self.external_reconfig: for reconfig_file in RECONFIG_FILES: if os.path.exists(reconfig_file): log.debug("removing reconfig trigger file: %s" % reconfig_file) os.remove(reconfig_file) # and we are done with applying changes log.info("all changes have been applied")
def _prepare_configuration(storage, payload, ksdata): """Configure the installed system.""" configuration_queue = TaskQueue("Configuration queue") # connect progress reporting configuration_queue.queue_started.connect( lambda x: progress_message(x.status_message)) configuration_queue.task_completed.connect(lambda x: progress_step(x.name)) # schedule the execute methods of ksdata that require an installed system to be present os_config = TaskQueue("Installed system configuration", N_("Configuring installed system")) os_config.append(Task("Configure authselect", ksdata.authselect.execute)) # add installation tasks for the Security DBus module security_proxy = SECURITY.get_proxy() security_dbus_tasks = security_proxy.InstallWithTasks() os_config.append_dbus_tasks(SECURITY, security_dbus_tasks) # add installation tasks for the Services DBus module services_proxy = SERVICES.get_proxy() services_dbus_tasks = services_proxy.InstallWithTasks() os_config.append_dbus_tasks(SERVICES, services_dbus_tasks) # add installation tasks for the Timezone DBus module timezone_proxy = TIMEZONE.get_proxy() timezone_dbus_tasks = timezone_proxy.InstallWithTasks() os_config.append_dbus_tasks(TIMEZONE, timezone_dbus_tasks) # add installation tasks for the Localization DBus module localization_proxy = LOCALIZATION.get_proxy() localization_dbus_tasks = localization_proxy.InstallWithTasks() os_config.append_dbus_tasks(LOCALIZATION, localization_dbus_tasks) # add the Firewall configuration task firewall_proxy = NETWORK.get_proxy(FIREWALL) firewall_dbus_task = firewall_proxy.InstallWithTask() os_config.append_dbus_tasks(NETWORK, [firewall_dbus_task]) configuration_queue.append(os_config) # schedule network configuration (if required) if conf.system.provides_network_config: overwrite = isinstance(payload, LiveImagePayload) network_config = TaskQueue("Network configuration", N_("Writing network configuration")) network_config.append( Task("Network configuration", network.write_configuration, (overwrite, ))) configuration_queue.append(network_config) # add installation tasks for the Users DBus module user_config = TaskQueue("User creation", N_("Creating users")) users_proxy = USERS.get_proxy() users_dbus_tasks = users_proxy.InstallWithTasks() os_config.append_dbus_tasks(USERS, users_dbus_tasks) configuration_queue.append(user_config) # Anaconda addon configuration addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons")) # there is no longer a User class & addons should no longer need it # FIXME: drop user class parameter from the API & all known addons addon_config.append( Task("Configure Anaconda addons", ksdata.addons.execute, (storage, ksdata, None, payload))) boss_proxy = BOSS.get_proxy() addon_config.append_dbus_tasks(BOSS, [boss_proxy.InstallSystemWithTask()]) configuration_queue.append(addon_config) # Initramfs generation generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs")) generate_initramfs.append( Task("Generate initramfs", payload.recreate_initrds)) # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is # recreated after the first writeBootLoader call. This reruns it after the new initrd has # been created, fixing the kernel root and subvol args and adding the missing initrd entry. boot_on_btrfs = isinstance(storage.mountpoints.get("/"), BTRFSDevice) bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED if isinstance(payload, LiveImagePayload) and boot_on_btrfs and bootloader_enabled: generate_initramfs.append( Task("Write BTRFS bootloader fix", write_boot_loader, (storage, payload))) # Invoking zipl should be the last thing done on a s390x installation (see #1652727). if arch.is_s390() and not conf.target.is_directory and bootloader_enabled: generate_initramfs.append( Task("Rerun zipl", lambda: util.execInSysroot("zipl", []))) configuration_queue.append(generate_initramfs) # realm join # - this can run only after network is configured in the target system chroot configuration_queue.append_dbus_tasks(SECURITY, [security_proxy.JoinRealmWithTask()]) post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts")) post_scripts.append( Task("Run post installation scripts", runPostScripts, (ksdata.scripts, ))) configuration_queue.append(post_scripts) # setup kexec reboot if requested if flags.flags.kexec: kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec")) kexec_setup.append(Task("Setup kexec", setup_kexec)) configuration_queue.append(kexec_setup) # write anaconda related configs & kickstarts write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts")) # Write the kickstart file to the installed system (or, copy the input # kickstart file over if one exists). if flags.flags.nosave_output_ks: # don't write the kickstart file to the installed system if this has # been disabled by the nosave option log.warning( "Writing of the output kickstart to installed system has been disabled" " by the nosave option.") else: # write anaconda related configs & kickstarts write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, ))) # only add write_configs to the main queue if we actually store some kickstarts/configs if write_configs.task_count: configuration_queue.append(write_configs) return configuration_queue
def _prepare_installation(payload, ksdata): """Perform an installation. This method takes the ksdata as prepared by the UI (the first hub, in graphical mode) and applies it to the disk. The two main tasks for this are putting filesystems onto disks and installing packages onto those filesystems. """ installation_queue = TaskQueue("Installation queue") # connect progress reporting installation_queue.queue_started.connect( lambda x: progress_message(x.status_message)) installation_queue.task_completed.connect(lambda x: progress_step(x.name)) # This should be the only thread running, wait for the others to finish if not. if threadMgr.running > 1: # it could be that the threads finish execution before the task is executed, # but that should not cause any issues def wait_for_all_treads(): for message in ("Thread %s is running" % n for n in threadMgr.names): log.debug(message) threadMgr.wait_all() # Use a queue with a single task as only TaskQueues have the status_message # property used for setting the progress status in the UI. wait_for_threads = TaskQueue( "Wait for threads to finish", N_("Waiting for %s threads to finish") % (threadMgr.running - 1)) wait_for_threads.append( Task("Wait for all threads to finish", wait_for_all_treads)) installation_queue.append(wait_for_threads) # Save system time to HW clock. # - this used to be before waiting on threads, but I don't think that's needed if conf.system.can_set_hardware_clock: # lets just do this as a top-level task - no save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock) installation_queue.append(save_hwclock) # setup the installation environment setup_environment = TaskQueue( "Installation environment setup", N_("Setting up the installation environment")) setup_environment.append( Task("Setup addons", ksdata.addons.setup, (None, ksdata, payload))) boss_proxy = BOSS.get_proxy() setup_environment.append_dbus_tasks( BOSS, [boss_proxy.ConfigureRuntimeWithTask()]) installation_queue.append(setup_environment) # Do partitioning. # Depending on current payload the storage might be apparently configured # either before or after package/payload installation. # So let's have two task queues - early storage & late storage. storage_proxy = STORAGE.get_proxy() early_storage = TaskQueue("Early storage configuration", N_("Configuring storage")) early_storage.append_dbus_tasks(STORAGE, storage_proxy.InstallWithTasks()) if payload.needs_storage_configuration: conf_task = storage_proxy.WriteConfigurationWithTask() early_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(early_storage) # Run %pre-install scripts with the filesystem mounted and no packages pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts")) pre_install_scripts.append( Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts, ))) installation_queue.append(pre_install_scripts) # Do various pre-installation tasks # - try to discover a realm (if any) # - check for possibly needed additional packages. pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks")) # Setup timezone and add chrony as package if timezone was set in KS # and "-chrony" wasn't in packages section and/or --nontp wasn't set. timezone_proxy = TIMEZONE.get_proxy() ntp_excluded = timezone.NTP_PACKAGE in ksdata.packages.excludedList pre_install.append_dbus_tasks( TIMEZONE, [timezone_proxy.ConfigureNTPServiceEnablementWithTask(ntp_excluded)]) # make name resolution work for rpm scripts in chroot if conf.system.provides_resolver_config: # we use a custom Task subclass as the sysroot path has to be resolved # only when the task is actually started, not at task creation time pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot")) # realm discovery security_proxy = SECURITY.get_proxy() pre_install.append_dbus_tasks(SECURITY, [security_proxy.DiscoverRealmWithTask()]) def run_pre_install(): """This means to gather what additional packages (if any) are needed & executing payload.pre_install().""" # anaconda requires storage packages in order to make sure the target # system is bootable and configurable, and some other packages in order # to finish setting up the system. if kernel_arguments.is_enabled("fips"): payload.requirements.add_packages(['/usr/bin/fips-mode-setup'], reason="compliance") payload.requirements.add_groups(payload.language_groups(), reason="language groups") payload.requirements.add_packages(payload.langpacks(), reason="langpacks", strong=False) # add package requirements from modules # - iterate over all modules we know have valid package requirements # - add any requirements found to the payload requirement tracking modules_with_package_requirements = [ SECURITY, NETWORK, TIMEZONE, STORAGE, SUBSCRIPTION ] for module in modules_with_package_requirements: # Skip unavailable modules. if not is_module_available(module): continue module_proxy = module.get_proxy() module_requirements = Requirement.from_structure_list( module_proxy.CollectRequirements()) log.debug("Adding requirements for module %s : %s", module, module_requirements) payload.requirements.add_requirements(module_requirements) payload.pre_install() pre_install.append( Task("Find additional packages & run pre_install()", run_pre_install)) installation_queue.append(pre_install) payload_install = TaskQueue("Payload installation", N_("Installing.")) payload_install.append(Task("Install the payload", payload.install)) installation_queue.append(payload_install) # for some payloads storage is configured after the payload is installed if not payload.needs_storage_configuration: late_storage = TaskQueue("Late storage configuration", N_("Configuring storage")) conf_task = storage_proxy.WriteConfigurationWithTask() late_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(late_storage) # Do bootloader. bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader")) def configure_bootloader(): boot_task = bootloader_proxy.ConfigureWithTask( payload.kernel_version_list) sync_run_task(STORAGE.get_proxy(boot_task)) if not payload.handles_bootloader_configuration: # FIXME: This is a temporary workaround, run the DBus task directly. bootloader_install.append( Task("Configure the bootloader", configure_bootloader)) bootloader_install.append_dbus_tasks(STORAGE, [bootloader_proxy.InstallWithTask()]) installation_queue.append(bootloader_install) post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks"))) post_install.append( Task("Run post-installation setup tasks", payload.post_install)) installation_queue.append(post_install) # Create snapshot snapshot_proxy = STORAGE.get_proxy(SNAPSHOT) if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL): snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots")) snapshot_task = snapshot_proxy.CreateWithTask( SNAPSHOT_WHEN_POST_INSTALL) snapshot_creation.append_dbus_tasks(STORAGE, [snapshot_task]) installation_queue.append(snapshot_creation) return installation_queue