def __init__(self, *args, **kwargs): StandaloneSpoke.__init__(self, *args, **kwargs) LangLocaleHandler.__init__(self) self._origStrings = {} self._l12_module = LOCALIZATION.get_proxy() self._tz_module = None if is_module_available(TIMEZONE): self._tz_module = TIMEZONE.get_proxy() self._only_existing_locales = True
def initialize_first_boot_action(): """Initialize the setup on boot action.""" if not is_module_available(SERVICES): return services_proxy = SERVICES.get_proxy() if services_proxy.SetupOnBoot == SETUP_ON_BOOT_DEFAULT: if not flags.automatedInstall: # Enable by default for interactive installations. services_proxy.SetSetupOnBoot(SETUP_ON_BOOT_ENABLED)
def is_cdn_registration_required(payload): """Check if Red Hat CDN registration is required. :param payload: the payload object """ if not check_cdn_is_installation_source(payload): return False if not is_module_available(SUBSCRIPTION): return False subscription_proxy = SUBSCRIPTION.get_proxy() return not subscription_proxy.IsSubscriptionAttached
def revert_changes(self, ksdata, storage): """:see: RuleHander.revert_changes""" if is_module_available(KDUMP): kdump_proxy = KDUMP.get_proxy() if self._kdump_enabled is not None: kdump_proxy.KdumpEnabled = self._kdump_default_enabled else: log.warning("OSCAP Addon: com_redhat_kdump is not installed. " "Skipping reverting kdump configuration") self._kdump_enabled = None self._kdump_default_enabled = None
def initialize_security(): """Initialize the security configuration.""" if not is_module_available(SECURITY): return security_proxy = SECURITY.get_proxy() # Override the selinux state from kickstart if set on the command line if conf.security.selinux != constants.SELINUX_DEFAULT: security_proxy.SetSELinux(conf.security.selinux) # Enable fingerprint option by default (#481273). if not flags.automatedInstall: security_proxy.SetFingerprintAuthEnabled(True)
def should_run(cls, environment, data): """Should the spoke run?""" if not is_module_available(USERS): return False if FirstbootSpokeMixIn.should_run(environment, data): return True # the user spoke should run always in the anaconda and in firstboot only # when doing reconfig or if no user has been created in the installation users_module = USERS.get_proxy() user_list = get_user_list(users_module) if environment == FIRSTBOOT_ENVIRON and data and not user_list: return True return False
def initialize_system_clock(): """Initialize the system clock.""" if not conf.system.can_initialize_system_clock: log.debug("Skip the clock initialization.") return if not is_module_available(TIMEZONE): return from pyanaconda.timezone import time_initialize timezone_proxy = TIMEZONE.get_proxy() threadMgr.add( AnacondaThread(name=constants.THREAD_TIME_INIT, target=time_initialize, args=(timezone_proxy, )))
def status(self): if self._error_msgs: return _("Error checking software selection") cdn_source = check_cdn_is_installation_source(self.payload) subscribed = False if is_module_available(SUBSCRIPTION): subscription_proxy = SUBSCRIPTION.get_proxy() subscribed = subscription_proxy.IsSubscriptionAttached if cdn_source and not subscribed: return _("Red Hat CDN requires registration.") if not self.ready: return _("Installation source not set up") if not self.txid_valid: return _("Source changed - please verify") # kickstart installation if flags.automatedInstall: if self._kickstarted: # %packages section is present in kickstart but environment is not set if not self._selection.environment: return _("Custom software selected") # environment is set to an invalid value elif not self.is_environment_valid( self._selection.environment): return _("Invalid environment specified in kickstart") # we have no packages section in the kickstart and no environment has been set elif not self._selection.environment: return _("Please confirm software selection") if not flags.automatedInstall: if not self._selection.environment: # No environment yet set return _("Please confirm software selection") elif not self.is_environment_valid(self._selection.environment): # selected environment is not valid, this can happen when a valid environment # is selected (by default, manually or from kickstart) and then the installation # source is switched to one where the selected environment is no longer valid return _("Selected environment is not valid") return self.payload.environment_description( self._selection.environment)[0]
def reinitialize_locale(opts, text_mode): """Reinitialize locale. We need to reinitialize the locale if GUI startup failed. The text mode might not be able to display the characters from our current locale. :param opts: the command line/boot options :param text_mode: is the locale being set up for the text mode? """ from pyanaconda import localization localization_proxy = None if is_module_available(LOCALIZATION): localization_proxy = LOCALIZATION.get_proxy() log.warning("reinitializing locale due to failed attempt to start the GUI") localization.setup_locale(os.environ["LANG"], localization_proxy, text_mode=text_mode)
def is_module_not_available_test(self, get_proxy): """Test the is_module_available() function - module not available.""" # mock the Boss proxy boss_proxy = get_proxy.return_value # make sure it returns a list not containing the Subscription module running_modules = [ "org.fedoraproject.Anaconda.Modules.Timezone", "org.fedoraproject.Anaconda.Modules.Network", "org.fedoraproject.Anaconda.Modules.Localization", "org.fedoraproject.Anaconda.Modules.Security", "org.fedoraproject.Anaconda.Modules.Users", "org.fedoraproject.Anaconda.Modules.Payloads", "org.fedoraproject.Anaconda.Modules.Storage", "org.fedoraproject.Anaconda.Modules.Services", ] boss_proxy.GetModules.return_value = running_modules # call the function self.assertFalse(is_module_available(SUBSCRIPTION))
def should_run(cls, environment, data): """Should the spoke run?""" if not is_module_available(USERS): return False # the user spoke should run always in the anaconda and in firstboot only # when doing reconfig or if no user has been created in the installation users_module = USERS.get_proxy() user_list = get_user_list(users_module) if environment == constants.ANACONDA_ENVIRON: return True elif environment == constants.FIRSTBOOT_ENVIRON and data is None: # cannot decide, stay in the game and let another call with data # available (will come) decide return True elif environment == constants.FIRSTBOOT_ENVIRON and data and not user_list: return True else: return False
def initialize_default_systemd_target(text_mode): """Initialize the default systemd target. If we're in text mode, the resulting system should be too unless the kickstart specified otherwise. NOTE: Installation controlled via VNC is considered to be a text mode installation, as the installation run itself is effectively headless. :param text_mode: does the installer run in the text mode? """ if not is_module_available(SERVICES): return services_proxy = SERVICES.get_proxy() if not services_proxy.DefaultTarget and (text_mode or flags.usevnc): log.debug("no default systemd target set & in text/vnc mode - setting multi-user.target.") services_proxy.SetDefaultTarget(TEXT_ONLY_TARGET)
def start_chronyd(): """Start the NTP daemon chronyd. Set up NTP servers and start NTP daemon if not requested otherwise. """ if not conf.system.can_set_time_synchronization: log.debug("Skip the time synchronization.") return if not is_module_available(TIMEZONE): log.debug("Skip the time synchronization due to disabled module.") return timezone_proxy = TIMEZONE.get_proxy() enabled = timezone_proxy.NTPEnabled servers = TimeSourceData.from_structure_list(timezone_proxy.TimeSources) if servers: ntp.save_servers_to_config(servers) if enabled: start_service("chronyd")
def collect_language_requirements(dnf_base): """Collect requirements for supported languages. :param dnf_base: a DNF base :return: a list of requirements """ requirements = [] if not is_module_available(LOCALIZATION): return requirements localization_proxy = LOCALIZATION.get_proxy() locales = [localization_proxy.Language ] + localization_proxy.LanguageSupport # Find all available langpacks. packages = dnf_base.sack.query().available().filter( name__glob="langpacks-*") # Get all valid langcodes. codes = [p.name.split('-', 1)[1] for p in packages] codes = list(filter(is_valid_langcode, codes)) # Find the best langpacks to install. for locale in locales: best_locale = find_best_locale_match(locale, codes) if not best_locale: log.warning( "Selected locale '%s' does not match " "any available langpacks.", locale) continue requirements.append( Requirement.for_package( package_name="langpacks-" + best_locale, reason="Required to support the locale '{}'.".format(locale))) return requirements
def save_hw_clock(timezone_proxy=None): """ Save system time to HW clock. :param timezone_proxy: DBus proxy of the timezone module """ if arch.is_s390(): return if not is_module_available(TIMEZONE): return if not timezone_proxy: timezone_proxy = TIMEZONE.get_proxy() cmd = "hwclock" args = ["--systohc"] if timezone_proxy.IsUTC: args.append("--utc") else: args.append("--local") util.execWithRedirect(cmd, args)
def _wait_for_network(self, timeout=NETWORK_CONNECTION_TIMEOUT): """Wait until network is available, or time runs out :param float timeout: how long shall we try waiting :return bool: is there network connectivity """ if not is_module_available(NETWORK): return False network = NETWORK.get_proxy() if network.Connected: return True log.info("Geoloc: Waiting for network to become available") interval = 0.1 start = time.perf_counter() end = start + timeout while time.perf_counter() < end: time.sleep(interval) if network.Connected: return True return network.Connected
def activate_keyboard(opts): """Activate keyboard. Set up keyboard layout from the command line option and let it override from kickstart if/when X is initialized. :param opts: the command line/boot options """ if not is_module_available(LOCALIZATION): return from pyanaconda import keyboard localization_proxy = LOCALIZATION.get_proxy() if opts.keymap and not localization_proxy.KeyboardKickstarted: localization_proxy.SetKeyboard(opts.keymap) localization_proxy.SetKeyboardKickstarted(True) if localization_proxy.KeyboardKickstarted: if conf.system.can_activate_keyboard: keyboard.activate_keyboard(localization_proxy) else: # at least make sure we have all the values keyboard.populate_missing_items(localization_proxy)
def should_run(cls, environment, data): """Should the spoke run?""" if not is_module_available(USERS): return False return FirstbootSpokeMixIn.should_run(environment, data)
def _prepare_installation(payload, ksdata): """Perform an installation. This method takes the ksdata as prepared by the UI (the first hub, in graphical mode) and applies it to the disk. The two main tasks for this are putting filesystems onto disks and installing packages onto those filesystems. """ installation_queue = TaskQueue("Installation queue") # connect progress reporting installation_queue.queue_started.connect( lambda x: progress_message(x.status_message)) installation_queue.task_completed.connect(lambda x: progress_step(x.name)) # This should be the only thread running, wait for the others to finish if not. if threadMgr.running > 1: # it could be that the threads finish execution before the task is executed, # but that should not cause any issues def wait_for_all_treads(): for message in ("Thread %s is running" % n for n in threadMgr.names): log.debug(message) threadMgr.wait_all() # Use a queue with a single task as only TaskQueues have the status_message # property used for setting the progress status in the UI. wait_for_threads = TaskQueue( "Wait for threads to finish", N_("Waiting for %s threads to finish") % (threadMgr.running - 1)) wait_for_threads.append( Task("Wait for all threads to finish", wait_for_all_treads)) installation_queue.append(wait_for_threads) # Save system time to HW clock. # - this used to be before waiting on threads, but I don't think that's needed if conf.system.can_set_hardware_clock: # lets just do this as a top-level task - no save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock) installation_queue.append(save_hwclock) # setup the installation environment setup_environment = TaskQueue( "Installation environment setup", N_("Setting up the installation environment")) boss_proxy = BOSS.get_proxy() for service_name, object_path in boss_proxy.CollectConfigureRuntimeTasks(): task_proxy = DBus.get_proxy(service_name, object_path) setup_environment.append(DBusTask(task_proxy)) installation_queue.append(setup_environment) # Do partitioning. # Depending on current payload the storage might be apparently configured # either before or after package/payload installation. # So let's have two task queues - early storage & late storage. storage_proxy = STORAGE.get_proxy() early_storage = TaskQueue("Early storage configuration", N_("Configuring storage")) early_storage.append_dbus_tasks(STORAGE, storage_proxy.InstallWithTasks()) if payload.type == PAYLOAD_TYPE_DNF: conf_task = storage_proxy.WriteConfigurationWithTask() early_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(early_storage) # Run %pre-install scripts with the filesystem mounted and no packages pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts")) pre_install_scripts.append( Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts, ))) installation_queue.append(pre_install_scripts) # Do various pre-installation tasks # - try to discover a realm (if any) # - check for possibly needed additional packages. pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks")) # make name resolution work for rpm scripts in chroot if conf.system.provides_resolver_config: # we use a custom Task subclass as the sysroot path has to be resolved # only when the task is actually started, not at task creation time pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot")) if is_module_available(SECURITY): security_proxy = SECURITY.get_proxy() # Discover a realm. pre_install.append_dbus_tasks(SECURITY, [security_proxy.DiscoverRealmWithTask()]) # Set up FIPS for the payload installation. fips_task = security_proxy.PreconfigureFIPSWithTask(payload.type) pre_install.append_dbus_tasks(SECURITY, [fips_task]) # Install the payload. pre_install.append( Task("Find additional packages & run pre_install()", payload.pre_install)) installation_queue.append(pre_install) payload_install = TaskQueue("Payload installation", N_("Installing.")) payload_install.append(Task("Install the payload", payload.install)) installation_queue.append(payload_install) # for some payloads storage is configured after the payload is installed if payload.type != PAYLOAD_TYPE_DNF: late_storage = TaskQueue("Late storage configuration", N_("Configuring storage")) conf_task = storage_proxy.WriteConfigurationWithTask() late_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(late_storage) # Do bootloader. bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader")) def run_install_bootloader(): tasks = bootloader_proxy.InstallBootloaderWithTasks( payload.type, payload.kernel_version_list) for task in tasks: sync_run_task(STORAGE.get_proxy(task)) bootloader_install.append( Task("Install bootloader", run_install_bootloader)) installation_queue.append(bootloader_install) post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks"))) post_install.append( Task("Run post-installation setup tasks", payload.post_install)) installation_queue.append(post_install) # Create snapshot snapshot_proxy = STORAGE.get_proxy(SNAPSHOT) if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL): snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots")) snapshot_task = snapshot_proxy.CreateWithTask( SNAPSHOT_WHEN_POST_INSTALL) snapshot_creation.append_dbus_tasks(STORAGE, [snapshot_task]) installation_queue.append(snapshot_creation) return installation_queue
def _prepare_configuration(payload, ksdata): """Configure the installed system.""" configuration_queue = TaskQueue("Configuration queue") # connect progress reporting configuration_queue.queue_started.connect( lambda x: progress_message(x.status_message)) configuration_queue.task_completed.connect(lambda x: progress_step(x.name)) # add installation tasks for the Subscription DBus module if is_module_available(SUBSCRIPTION): # we only run the tasks if the Subscription module is available subscription_config = TaskQueue("Subscription configuration", N_("Configuring Red Hat subscription")) subscription_proxy = SUBSCRIPTION.get_proxy() subscription_dbus_tasks = subscription_proxy.InstallWithTasks() subscription_config.append_dbus_tasks(SUBSCRIPTION, subscription_dbus_tasks) configuration_queue.append(subscription_config) # schedule the execute methods of ksdata that require an installed system to be present os_config = TaskQueue("Installed system configuration", N_("Configuring installed system")) # add installation tasks for the Security DBus module security_proxy = SECURITY.get_proxy() security_dbus_tasks = security_proxy.InstallWithTasks() os_config.append_dbus_tasks(SECURITY, security_dbus_tasks) # add installation tasks for the Timezone DBus module # run these tasks before tasks of the Services module if is_module_available(TIMEZONE): timezone_proxy = TIMEZONE.get_proxy() timezone_dbus_tasks = timezone_proxy.InstallWithTasks() os_config.append_dbus_tasks(TIMEZONE, timezone_dbus_tasks) # add installation tasks for the Services DBus module services_proxy = SERVICES.get_proxy() services_dbus_tasks = services_proxy.InstallWithTasks() os_config.append_dbus_tasks(SERVICES, services_dbus_tasks) # add installation tasks for the Localization DBus module if is_module_available(LOCALIZATION): localization_proxy = LOCALIZATION.get_proxy() localization_dbus_tasks = localization_proxy.InstallWithTasks() os_config.append_dbus_tasks(LOCALIZATION, localization_dbus_tasks) # add the Firewall configuration task if conf.target.can_configure_network: firewall_proxy = NETWORK.get_proxy(FIREWALL) firewall_dbus_task = firewall_proxy.InstallWithTask() os_config.append_dbus_tasks(NETWORK, [firewall_dbus_task]) configuration_queue.append(os_config) # schedule network configuration (if required) if conf.target.can_configure_network and conf.system.provides_network_config: overwrite = payload.type in PAYLOAD_LIVE_TYPES network_config = TaskQueue("Network configuration", N_("Writing network configuration")) network_config.append( Task("Network configuration", network.write_configuration, (overwrite, ))) configuration_queue.append(network_config) # add installation tasks for the Users DBus module if is_module_available(USERS): user_config = TaskQueue("User creation", N_("Creating users")) users_proxy = USERS.get_proxy() users_dbus_tasks = users_proxy.InstallWithTasks() user_config.append_dbus_tasks(USERS, users_dbus_tasks) configuration_queue.append(user_config) # Anaconda addon configuration addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons")) # there is no longer a User class & addons should no longer need it # FIXME: drop user class parameter from the API & all known addons addon_config.append( Task("Configure Anaconda addons", ksdata.addons.execute, (None, ksdata, None, payload))) boss_proxy = BOSS.get_proxy() addon_config.append_dbus_tasks(BOSS, [boss_proxy.InstallSystemWithTask()]) configuration_queue.append(addon_config) # Initramfs generation generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs")) bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) def run_generate_initramfs(): tasks = bootloader_proxy.GenerateInitramfsWithTasks( payload.type, payload.kernel_version_list) for task in tasks: sync_run_task(STORAGE.get_proxy(task)) generate_initramfs.append( Task("Generate initramfs", run_generate_initramfs)) configuration_queue.append(generate_initramfs) # Configure FIPS. configuration_queue.append_dbus_tasks( SECURITY, [security_proxy.ConfigureFIPSWithTask()]) # realm join # - this can run only after network is configured in the target system chroot configuration_queue.append_dbus_tasks(SECURITY, [security_proxy.JoinRealmWithTask()]) post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts")) post_scripts.append( Task("Run post installation scripts", runPostScripts, (ksdata.scripts, ))) configuration_queue.append(post_scripts) # setup kexec reboot if requested if flags.flags.kexec: kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec")) kexec_setup.append(Task("Setup kexec", setup_kexec)) configuration_queue.append(kexec_setup) # write anaconda related configs & kickstarts write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts")) # Write the kickstart file to the installed system (or, copy the input # kickstart file over if one exists). if flags.flags.nosave_output_ks: # don't write the kickstart file to the installed system if this has # been disabled by the nosave option log.warning( "Writing of the output kickstart to installed system has been disabled" " by the nosave option.") else: # write anaconda related configs & kickstarts write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, ))) # only add write_configs to the main queue if we actually store some kickstarts/configs if write_configs.task_count: configuration_queue.append(write_configs) return configuration_queue
def _prepare_installation(self, payload, ksdata): """Perform an installation. This method takes the ksdata as prepared by the UI (the first hub, in graphical mode) and applies it to the disk. The two main tasks for this are putting filesystems onto disks and installing packages onto those filesystems. """ installation_queue = TaskQueue("Installation queue") # connect progress reporting installation_queue.queue_started.connect( lambda x: progress_message(x.status_message)) installation_queue.task_completed.connect( lambda x: progress_step(x.name)) # setup the installation environment setup_environment = TaskQueue( "Installation environment setup", _("Setting up the installation environment")) boss_proxy = BOSS.get_proxy() for service_name, object_path in boss_proxy.CollectConfigureRuntimeTasks( ): task_proxy = DBus.get_proxy(service_name, object_path) setup_environment.append(DBusTask(task_proxy)) # Add configuration tasks for the Localization DBus module. if is_module_available(LOCALIZATION): localization_proxy = LOCALIZATION.get_proxy() # Populate the missing keyboard values before the payload installation, # so the module requirements can be generated for the right configuration. # FIXME: Make sure that the module always returns right values. populate_task = localization_proxy.PopulateMissingKeyboardConfigurationWithTask( ) setup_environment.append_dbus_tasks(LOCALIZATION, [populate_task]) installation_queue.append(setup_environment) # Do partitioning. # Depending on current payload the storage might be apparently configured # either before or after package/payload installation. # So let's have two task queues - early storage & late storage. storage_proxy = STORAGE.get_proxy() early_storage = TaskQueue("Early storage configuration", _("Configuring storage")) early_storage.append_dbus_tasks(STORAGE, storage_proxy.InstallWithTasks()) if payload.type == PAYLOAD_TYPE_DNF: conf_task = storage_proxy.WriteConfigurationWithTask() early_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(early_storage) # Run %pre-install scripts with the filesystem mounted and no packages pre_install_scripts = TaskQueue("Pre-install scripts", _("Running pre-installation scripts")) pre_install_scripts.append( Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts, ))) installation_queue.append(pre_install_scripts) # Do various pre-installation tasks # - try to discover a realm (if any) # - check for possibly needed additional packages. pre_install = TaskQueue("Pre install tasks", _("Running pre-installation tasks")) if is_module_available(SECURITY): security_proxy = SECURITY.get_proxy() # Discover a realm. pre_install.append_dbus_tasks( SECURITY, [security_proxy.DiscoverRealmWithTask()]) # Set up FIPS for the payload installation. fips_task = security_proxy.PreconfigureFIPSWithTask(payload.type) pre_install.append_dbus_tasks(SECURITY, [fips_task]) # Install the payload. pre_install.append( Task("Find additional packages & run pre_install()", payload.pre_install)) installation_queue.append(pre_install) payload_install = TaskQueue("Payload installation", _("Installing.")) payload_install.append(Task("Install the payload", payload.install)) installation_queue.append(payload_install) # for some payloads storage is configured after the payload is installed if payload.type != PAYLOAD_TYPE_DNF: late_storage = TaskQueue("Late storage configuration", _("Configuring storage")) conf_task = storage_proxy.WriteConfigurationWithTask() late_storage.append_dbus_tasks(STORAGE, [conf_task]) installation_queue.append(late_storage) # Do bootloader. bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) bootloader_install = TaskQueue("Bootloader installation", _("Installing boot loader")) def run_configure_bootloader(): tasks = boss_proxy.CollectConfigureBootloaderTasks( payload.kernel_version_list) for service, task in tasks: sync_run_task(DBus.get_proxy(service, task)) bootloader_install.append( Task("Configure bootloader", run_configure_bootloader)) def run_install_bootloader(): tasks = bootloader_proxy.InstallBootloaderWithTasks( payload.type, payload.kernel_version_list) for task in tasks: sync_run_task(STORAGE.get_proxy(task)) bootloader_install.append( Task("Install bootloader", run_install_bootloader)) installation_queue.append(bootloader_install) post_install = TaskQueue("Post-installation setup tasks", _("Performing post-installation setup tasks")) post_install.append( Task("Run post-installation setup tasks", payload.post_install)) installation_queue.append(post_install) # Create snapshot snapshot_proxy = STORAGE.get_proxy(SNAPSHOT) if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL): snapshot_creation = TaskQueue( "Creating post installation snapshots", _("Creating snapshots")) snapshot_task = snapshot_proxy.CreateWithTask( SNAPSHOT_WHEN_POST_INSTALL) snapshot_creation.append_dbus_tasks(STORAGE, [snapshot_task]) installation_queue.append(snapshot_creation) return installation_queue
exception.test_exception_handling()) signal.signal(signal.SIGUSR2, lambda signum, frame: anaconda.dumpState()) atexit.register(exitHandler, ksdata.reboot) from pyanaconda import exception anaconda.mehConfig = exception.initExceptionHandling(anaconda) # Start the subscription handling thread if the Subscription DBus module # provides enough authentication data. # - as kickstart only supports org + key authentication & nothing # else currently talks to the Subscription DBus module, # we only check if organization id & at least one activation # key are available from pyanaconda.modules.common.util import is_module_available from pyanaconda.modules.common.constants.services import SUBSCRIPTION if is_module_available(SUBSCRIPTION): from pyanaconda.ui.lib.subscription import org_keys_sufficient, register_and_subscribe if org_keys_sufficient(): threadMgr.add( AnacondaThread( name=constants.THREAD_SUBSCRIPTION, target=register_and_subscribe, args=[anaconda.payload] ) ) # add additional repositories from the cmdline to kickstart data anaconda.add_additional_repositories_to_ksdata() # Fallback to default for interactive or for a kickstart with no installation method. fallback = not flags.automatedInstall \
def should_run(cls, environment, data): """Should the spoke run?""" if not is_module_available(LOCALIZATION): return False return StandaloneSpoke.should_run(environment, data)
def should_run(cls, environment, data): return is_module_available(KDUMP)
def should_run(cls, environment, data): """The Subscription spoke should run only if the Subscription module is available.""" return is_module_available(SUBSCRIPTION)
def update_base_repo(self, fallback=True, checkmount=True): """Update the base repository from the DBus source.""" log.info("Configuring the base repo") self._reset_configuration() disabled_treeinfo_repo_names = self._cleanup_old_treeinfo_repositories() # Find the source and its type. source_proxy = self.get_source_proxy() source_type = source_proxy.Type # Change the default source to CDROM if there is a valid install media. # FIXME: Set up the default source earlier. if checkmount and self._is_source_default() and find_optical_install_media(): source_type = SOURCE_TYPE_CDROM source_proxy = create_source(source_type) set_source(self.proxy, source_proxy) # Set up the source. set_up_sources(self.proxy) # Read in all the repos from the installation environment, make a note of which # are enabled, and then disable them all. If the user gave us a method, we want # to use that instead of the default repos. self._base.read_all_repos() # Enable or disable updates. self.set_updates_enabled(self._updates_enabled) # Repo files are always loaded from the system. # When reloaded their state needs to be synchronized with the user configuration. # So we disable them now and enable them later if required. enabled = [] with self._repos_lock: for repo in self._base.repos.iter_enabled(): enabled.append(repo.id) repo.disable() # Add a new repo. if source_type not in SOURCE_REPO_FILE_TYPES: # Get the repo configuration of the first source. data = RepoConfigurationData.from_structure( self.proxy.GetRepoConfigurations()[0] ) log.debug("Using the repo configuration: %s", data) # Get the URL. install_tree_url = data.url if data.type == URL_TYPE_BASEURL else "" mirrorlist = data.url if data.type == URL_TYPE_MIRRORLIST else "" metalink = data.url if data.type == URL_TYPE_METALINK else "" # Fallback to the installation root. base_repo_url = install_tree_url try: self._refresh_install_tree(data) self._base.conf.releasever = self._get_release_version(install_tree_url) base_repo_url = self._get_base_repo_location(install_tree_url) log.debug("releasever from %s is %s", base_repo_url, self._base.conf.releasever) self._load_treeinfo_repositories(base_repo_url, disabled_treeinfo_repo_names, data) except configparser.MissingSectionHeaderError as e: log.error("couldn't set releasever from base repo (%s): %s", source_type, e) try: base_ksrepo = self.data.RepoData( name=constants.BASE_REPO_NAME, baseurl=base_repo_url, mirrorlist=mirrorlist, metalink=metalink, noverifyssl=not data.ssl_verification_enabled, proxy=data.proxy, sslcacert=data.ssl_configuration.ca_cert_path, sslclientcert=data.ssl_configuration.client_cert_path, sslclientkey=data.ssl_configuration.client_key_path ) self._add_repo_to_dnf(base_ksrepo) self._fetch_md(base_ksrepo.name) except (MetadataError, PayloadError) as e: log.error("base repo (%s/%s) not valid -- removing it", source_type, base_repo_url) log.error("reason for repo removal: %s", e) with self._repos_lock: self._base.repos.pop(constants.BASE_REPO_NAME, None) if not fallback: with self._repos_lock: for repo in self._base.repos.iter_enabled(): self._disable_repo(repo.id) return # Fallback to the default source # # This is at the moment CDN on RHEL # and closest mirror everywhere else. tear_down_sources(self.proxy) source_type = conf.payload.default_source source_proxy = create_source(source_type) set_source(self.proxy, source_proxy) set_up_sources(self.proxy) # We need to check this again separately in case REPO_FILES were set above. if source_type in SOURCE_REPO_FILE_TYPES: # If this is a kickstart install, just return now as we normally do not # want to read the on media repo files in such a case. On the other hand, # the local repo files are a valid use case if the system is subscribed # and the CDN is selected as the installation source. if self.source_type == SOURCE_TYPE_CDN and is_module_available(SUBSCRIPTION): # only check if the Subscription module is available & CDN is the # installation source subscription_proxy = SUBSCRIPTION.get_proxy() load_cdn_repos = subscription_proxy.IsSubscriptionAttached else: # if the Subscription module is not available, we simply can't use # the CDN repos, making our decision here simple load_cdn_repos = False if flags.automatedInstall and not load_cdn_repos: return # Otherwise, fall back to the default repos that we disabled above with self._repos_lock: for (id_, repo) in self._base.repos.items(): if id_ in enabled: log.debug("repo %s: fall back enabled from default repos", id_) repo.enable() for repo in self.addons: ksrepo = self.get_addon_repo(repo) if ksrepo.is_harddrive_based(): ksrepo.baseurl = self._setup_harddrive_addon_repo(ksrepo) log.debug("repo %s: mirrorlist %s, baseurl %s, metalink %s", ksrepo.name, ksrepo.mirrorlist, ksrepo.baseurl, ksrepo.metalink) # one of these must be set to create new repo if not (ksrepo.mirrorlist or ksrepo.baseurl or ksrepo.metalink or ksrepo.name in self._base.repos): raise PayloadSetupError("Repository %s has no mirror, baseurl or " "metalink set and is not one of " "the pre-defined repositories" % ksrepo.name) self._add_repo_to_dnf(ksrepo) with self._repos_lock: # disable unnecessary repos for repo in self._base.repos.iter_enabled(): id_ = repo.id if 'source' in id_ or 'debuginfo' in id_: self._disable_repo(id_) elif constants.isFinal and 'rawhide' in id_: self._disable_repo(id_) # fetch md for enabled repos enabled_repos = self._enabled_repos for repo_name in self.addons: if repo_name in enabled_repos: self._fetch_md(repo_name)
lambda signum, frame: exception.test_exception_handling()) signal.signal(signal.SIGUSR2, lambda signum, frame: anaconda.dumpState()) atexit.register(exitHandler, ksdata.reboot) from pyanaconda import exception anaconda.mehConfig = exception.initExceptionHandling(anaconda) # Start the subscription handling thread if the Subscription DBus module # provides enough authentication data. # - as kickstart only supports org + key authentication & nothing # else currently talks to the Subscription DBus module, # we only check if organization id & at least one activation # key are available from pyanaconda.modules.common.util import is_module_available from pyanaconda.modules.common.constants.services import SUBSCRIPTION if is_module_available(SUBSCRIPTION): from pyanaconda.ui.lib.subscription import org_keys_sufficient, register_and_subscribe if org_keys_sufficient(): threadMgr.add( AnacondaThread(name=constants.THREAD_SUBSCRIPTION, target=register_and_subscribe, args=[anaconda.payload])) # Fallback to default for interactive or for a kickstart with no installation method. fallback = not flags.automatedInstall \ or anaconda.payload.source_type == conf.payload.default_source payloadMgr.restart_thread(anaconda.payload, fallback=fallback) # initialize geolocation and start geolocation lookup if possible and enabled use_geoloc = startup_utils.check_if_geolocation_should_be_used(opts)