Beispiel #1
0
 def execute(self, storage, ksdata, instClass, users, payload):
     """This method calls execute on all the registered addons."""
     for v in self.__dict__.values():
         if hasattr(v, "execute"):
             progress_message(N_("Executing %s addon") % v.name)
             if v.execute.__code__.co_argcount == 6:
                 v.execute(storage, ksdata, instClass, users, payload)
             else:
                 v.execute(storage, ksdata, instClass, users)
                 log.warning("Addon %s is using deprecated method signature", v.name)
                 log.warning("Use execute(storage, ksdata, instClass, users, payload) instead")
Beispiel #2
0
def checkCD(media, diskname):
	#
	# check that it is the right roll CD
	#
	found_disk = 'false'
	while found_disk == 'false':
		diskid = media.getId()

		if diskname == diskid:
			found_disk = 'true'

		if found_disk == 'false':
			media.ejectCD()

            ## XXX This should interactive
			progress_message( _("Put Roll disk")
					+ " '%s' " % (diskname)
					+ _("in the drive\n"))
	return
Beispiel #3
0
def wait_for_entropy(msg, desired_entropy, ksdata):
    """
    Show UI dialog/message for waiting for desired random data entropy.

    :param ksdata: kickstart data
    :type ksdata: pykickstart.base.BaseHandler
    :param desired_entropy: entropy level to wait for
    :type desired_entropy: int
    :returns: whether to force continuing regardless of the available entropy level
    :rtype: bool

    """

    if ksdata.displaymode.displayMode == DISPLAY_MODE_GRAPHICAL:
        # cannot import globally because GUI code may be missing for text mode
        # in some cases
        from pyanaconda.ui.gui.spokes.lib.entropy_dialog import run_entropy_dialog
        progress_message(_("The system needs more random data entropy"))
        return run_entropy_dialog(ksdata, desired_entropy)
    else:
        return _tui_wait(msg, desired_entropy)
Beispiel #4
0
    def install(self):
        progress_message(N_('Starting package installation process'))

        # Add the rpm macros to the global transaction environment
        for macro in self.rpmMacros:
            rpm.addMacro(macro[0], macro[1])

        if self.install_device:
            self._setupMedia(self.install_device)
        try:
            self.checkSoftwareSelection()
            self._download_location = self._pick_download_location()
        except packaging.PayloadError as e:
            if errors.errorHandler.cb(e) == errors.ERROR_RAISE:
                _failure_limbo()

        pkgs_to_download = self._base.transaction.install_set
        log.info('Downloading packages.')
        progressQ.send_message(_('Downloading packages'))
        progress = DownloadProgress()
        try:
            self._base.download_packages(pkgs_to_download, progress)
        except dnf.exceptions.DownloadError as e:
            msg = 'Failed to download the following packages: %s' % str(e)
            exc = packaging.PayloadInstallError(msg)
            if errors.errorHandler.cb(exc) == errors.ERROR_RAISE:
                _failure_limbo()

        log.info('Downloading packages finished.')

        pre_msg = (N_("Preparing transaction from installation source"))
        progress_message(pre_msg)

        queue_instance = multiprocessing.Queue()
        process = multiprocessing.Process(target=do_transaction,
                                          args=(self._base, queue_instance))
        process.start()
        (token, msg) = queue_instance.get()
        while token not in ('post', 'quit'):
            if token == 'install':
                msg = _("Installing %s") % msg
                progressQ.send_message(msg)
            (token, msg) = queue_instance.get()

        if token == 'quit':
            _failure_limbo()

        post_msg = (N_("Performing post-installation setup tasks"))
        progress_message(post_msg)
        process.join()
        self._base.close()
        if os.path.exists(self._download_location):
            log.info("Cleaning up downloaded packages: %s", self._download_location)
            shutil.rmtree(self._download_location)
        else:
            # Some installation sources, such as NFS, don't need to download packages to
            # local storage, so the download location might not always exist. So for now
            # warn about this, at least until the RFE in bug 1193121 is implemented and
            # we don't have to care about clearing the download location ourselves.
            log.warning("Can't delete nonexistent download location: %s", self._download_location)
Beispiel #5
0
def _prepare_configuration(payload, ksdata):
    """Configure the installed system."""

    configuration_queue = TaskQueue("Configuration queue")
    # connect progress reporting
    configuration_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    configuration_queue.task_completed.connect(lambda x: progress_step(x.name))

    # add installation tasks for the Subscription DBus module
    if is_module_available(SUBSCRIPTION):
        # we only run the tasks if the Subscription module is available
        subscription_config = TaskQueue("Subscription configuration",
                                        N_("Configuring Red Hat subscription"))
        subscription_proxy = SUBSCRIPTION.get_proxy()
        subscription_dbus_tasks = subscription_proxy.InstallWithTasks()
        subscription_config.append_dbus_tasks(SUBSCRIPTION,
                                              subscription_dbus_tasks)
        configuration_queue.append(subscription_config)

    # schedule the execute methods of ksdata that require an installed system to be present
    os_config = TaskQueue("Installed system configuration",
                          N_("Configuring installed system"))

    # add installation tasks for the Security DBus module
    security_proxy = SECURITY.get_proxy()
    security_dbus_tasks = security_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(SECURITY, security_dbus_tasks)

    # add installation tasks for the Timezone DBus module
    # run these tasks before tasks of the Services module
    timezone_proxy = TIMEZONE.get_proxy()
    timezone_dbus_tasks = timezone_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(TIMEZONE, timezone_dbus_tasks)

    # add installation tasks for the Services DBus module
    services_proxy = SERVICES.get_proxy()
    services_dbus_tasks = services_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(SERVICES, services_dbus_tasks)

    # add installation tasks for the Localization DBus module
    localization_proxy = LOCALIZATION.get_proxy()
    localization_dbus_tasks = localization_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(LOCALIZATION, localization_dbus_tasks)

    # add the Firewall configuration task
    firewall_proxy = NETWORK.get_proxy(FIREWALL)
    firewall_dbus_task = firewall_proxy.InstallWithTask()
    os_config.append_dbus_tasks(NETWORK, [firewall_dbus_task])

    configuration_queue.append(os_config)

    # schedule network configuration (if required)
    if conf.system.provides_network_config:
        overwrite = payload.type in PAYLOAD_LIVE_TYPES
        network_config = TaskQueue("Network configuration",
                                   N_("Writing network configuration"))
        network_config.append(
            Task("Network configuration", network.write_configuration,
                 (overwrite, )))
        configuration_queue.append(network_config)

    # add installation tasks for the Users DBus module
    user_config = TaskQueue("User creation", N_("Creating users"))
    users_proxy = USERS.get_proxy()
    users_dbus_tasks = users_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(USERS, users_dbus_tasks)
    configuration_queue.append(user_config)

    # Anaconda addon configuration
    addon_config = TaskQueue("Anaconda addon configuration",
                             N_("Configuring addons"))

    # there is no longer a User class & addons should no longer need it
    # FIXME: drop user class parameter from the API & all known addons
    addon_config.append(
        Task("Configure Anaconda addons", ksdata.addons.execute,
             (None, ksdata, None, payload)))

    boss_proxy = BOSS.get_proxy()
    addon_config.append_dbus_tasks(BOSS, [boss_proxy.InstallSystemWithTask()])

    configuration_queue.append(addon_config)

    # Initramfs generation
    generate_initramfs = TaskQueue("Initramfs generation",
                                   N_("Generating initramfs"))
    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)

    def run_generate_initramfs():
        tasks = bootloader_proxy.GenerateInitramfsWithTasks(
            payload.type, payload.kernel_version_list)

        for task in tasks:
            sync_run_task(STORAGE.get_proxy(task))

    generate_initramfs.append(
        Task("Generate initramfs", run_generate_initramfs))
    configuration_queue.append(generate_initramfs)

    # realm join
    # - this can run only after network is configured in the target system chroot
    configuration_queue.append_dbus_tasks(SECURITY,
                                          [security_proxy.JoinRealmWithTask()])

    post_scripts = TaskQueue("Post installation scripts",
                             N_("Running post-installation scripts"))
    post_scripts.append(
        Task("Run post installation scripts", runPostScripts,
             (ksdata.scripts, )))
    configuration_queue.append(post_scripts)

    # setup kexec reboot if requested
    if flags.flags.kexec:
        kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec"))
        kexec_setup.append(Task("Setup kexec", setup_kexec))
        configuration_queue.append(kexec_setup)

    # write anaconda related configs & kickstarts
    write_configs = TaskQueue("Write configs and kickstarts",
                              N_("Storing configuration files and kickstarts"))

    # Write the kickstart file to the installed system (or, copy the input
    # kickstart file over if one exists).
    if flags.flags.nosave_output_ks:
        # don't write the kickstart file to the installed system if this has
        # been disabled by the nosave option
        log.warning(
            "Writing of the output kickstart to installed system has been disabled"
            " by the nosave option.")
    else:
        # write anaconda related configs & kickstarts
        write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, )))

    # only add write_configs to the main queue if we actually store some kickstarts/configs
    if write_configs.task_count:
        configuration_queue.append(write_configs)

    return configuration_queue
Beispiel #6
0
def _prepare_installation(payload, ksdata):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    installation_queue = TaskQueue("Installation queue")
    # connect progress reporting
    installation_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    installation_queue.task_completed.connect(lambda x: progress_step(x.name))

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        # it could be that the threads finish execution before the task is executed,
        # but that should not cause any issues

        def wait_for_all_treads():
            for message in ("Thread %s is running" % n
                            for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()

        # Use a queue with a single task as only TaskQueues have the status_message
        # property used for setting the progress status in the UI.
        wait_for_threads = TaskQueue(
            "Wait for threads to finish",
            N_("Waiting for %s threads to finish") % (threadMgr.running - 1))

        wait_for_threads.append(
            Task("Wait for all threads to finish", wait_for_all_treads))
        installation_queue.append(wait_for_threads)

    # Save system time to HW clock.
    # - this used to be before waiting on threads, but I don't think that's needed
    if conf.system.can_set_hardware_clock:
        # lets just do this as a top-level task - no
        save_hwclock = Task("Save system time to HW clock",
                            timezone.save_hw_clock)
        installation_queue.append(save_hwclock)

    # setup the installation environment
    setup_environment = TaskQueue(
        "Installation environment setup",
        N_("Setting up the installation environment"))
    setup_environment.append(
        Task("Setup addons", ksdata.addons.setup, (None, ksdata, payload)))

    boss_proxy = BOSS.get_proxy()
    setup_environment.append_dbus_tasks(
        BOSS, [boss_proxy.ConfigureRuntimeWithTask()])

    installation_queue.append(setup_environment)

    # Do partitioning.
    # Depending on current payload the storage might be apparently configured
    # either before or after package/payload installation.
    # So let's have two task queues - early storage & late storage.
    storage_proxy = STORAGE.get_proxy()
    early_storage = TaskQueue("Early storage configuration",
                              N_("Configuring storage"))
    early_storage.append_dbus_tasks(STORAGE, storage_proxy.InstallWithTasks())

    if payload.type == PAYLOAD_TYPE_DNF:
        conf_task = storage_proxy.WriteConfigurationWithTask()
        early_storage.append_dbus_tasks(STORAGE, [conf_task])

    installation_queue.append(early_storage)

    # Run %pre-install scripts with the filesystem mounted and no packages
    pre_install_scripts = TaskQueue("Pre-install scripts",
                                    N_("Running pre-installation scripts"))
    pre_install_scripts.append(
        Task("Run %pre-install scripts", runPreInstallScripts,
             (ksdata.scripts, )))
    installation_queue.append(pre_install_scripts)

    # Do various pre-installation tasks
    # - try to discover a realm (if any)
    # - check for possibly needed additional packages.
    pre_install = TaskQueue("Pre install tasks",
                            N_("Running pre-installation tasks"))

    # make name resolution work for rpm scripts in chroot
    if conf.system.provides_resolver_config:
        # we use a custom Task subclass as the sysroot path has to be resolved
        # only when the task is actually started, not at task creation time
        pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot"))

    # realm discovery
    security_proxy = SECURITY.get_proxy()
    pre_install.append_dbus_tasks(SECURITY,
                                  [security_proxy.DiscoverRealmWithTask()])

    # Install the payload.
    pre_install.append(
        Task("Find additional packages & run pre_install()",
             payload.pre_install))
    installation_queue.append(pre_install)

    payload_install = TaskQueue("Payload installation", N_("Installing."))
    payload_install.append(Task("Install the payload", payload.install))
    installation_queue.append(payload_install)

    # for some payloads storage is configured after the payload is installed
    if payload.type != PAYLOAD_TYPE_DNF:
        late_storage = TaskQueue("Late storage configuration",
                                 N_("Configuring storage"))
        conf_task = storage_proxy.WriteConfigurationWithTask()
        late_storage.append_dbus_tasks(STORAGE, [conf_task])
        installation_queue.append(late_storage)

    # Do bootloader.
    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_install = TaskQueue("Bootloader installation",
                                   N_("Installing boot loader"))

    def run_install_bootloader():
        tasks = bootloader_proxy.InstallBootloaderWithTasks(
            payload.type, payload.kernel_version_list)

        for task in tasks:
            sync_run_task(STORAGE.get_proxy(task))

    bootloader_install.append(
        Task("Install bootloader", run_install_bootloader))
    installation_queue.append(bootloader_install)

    post_install = TaskQueue("Post-installation setup tasks",
                             (N_("Performing post-installation setup tasks")))
    post_install.append(
        Task("Run post-installation setup tasks", payload.post_install))
    installation_queue.append(post_install)

    # Create snapshot
    snapshot_proxy = STORAGE.get_proxy(SNAPSHOT)

    if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL):
        snapshot_creation = TaskQueue("Creating post installation snapshots",
                                      N_("Creating snapshots"))
        snapshot_task = snapshot_proxy.CreateWithTask(
            SNAPSHOT_WHEN_POST_INSTALL)
        snapshot_creation.append_dbus_tasks(STORAGE, [snapshot_task])
        installation_queue.append(snapshot_creation)

    return installation_queue
Beispiel #7
0
def doConfiguration(storage, payload, ksdata):
    """Configure the installed system."""

    configuration_queue = TaskQueue("Configuration queue")
    # connect progress reporting
    configuration_queue.queue_started.connect(lambda x: progress_message(x.status_message))
    configuration_queue.task_completed.connect(lambda x: progress_step(x.name))

    # schedule the execute methods of ksdata that require an installed system to be present
    os_config = TaskQueue("Installed system configuration", N_("Configuring installed system"))
    os_config.append(Task("Configure authselect", ksdata.authselect.execute))

    security_proxy = SECURITY.get_proxy()
    security_dbus_tasks = security_proxy.InstallWithTasks(util.getSysroot())
    # add one Task instance per DBUS task
    for dbus_task in security_dbus_tasks:
        task_proxy = SECURITY.get_proxy(dbus_task)
        os_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy,)))

    services_proxy = SERVICES.get_proxy()
    services_dbus_tasks = services_proxy.InstallWithTasks(util.getSysroot())
    # add one Task instance per DBUS task
    for dbus_task in services_dbus_tasks:
        task_proxy = SERVICES.get_proxy(dbus_task)
        os_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy,)))

    os_config.append(Task("Configure keyboard", ksdata.keyboard.execute))
    os_config.append(Task("Configure timezone", ksdata.timezone.execute))

    localization_proxy = LOCALIZATION.get_proxy()
    localization_dbus_tasks = localization_proxy.InstallWithTasks(util.getSysroot())
    # add one Task instance per DBUS task
    for dbus_task in localization_dbus_tasks:
        task_proxy = LOCALIZATION.get_proxy(dbus_task)
        os_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy,)))

    firewall_proxy = NETWORK.get_proxy(FIREWALL)
    firewall_dbus_task = firewall_proxy.InstallWithTask(util.getSysroot())
    task_proxy = NETWORK.get_proxy(firewall_dbus_task)
    os_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy,)))

    os_config.append(Task("Configure X", ksdata.xconfig.execute))
    configuration_queue.append(os_config)

    # schedule network configuration (if required)
    if conf.system.provides_network_config:
        network_config = TaskQueue("Network configuration", N_("Writing network configuration"))
        network_config.append(Task("Network configuration",
                                   ksdata.network.execute, (payload, )))
        configuration_queue.append(network_config)

    # creating users and groups requires some pre-configuration.
    user_config = TaskQueue("User creation", N_("Creating users"))

    users_proxy = USERS.get_proxy()
    users_dbus_tasks = users_proxy.InstallWithTasks(util.getSysroot())
    # add one Task instance per DBUS task
    for dbus_task in users_dbus_tasks:
        task_proxy = USERS.get_proxy(dbus_task)
        user_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy,)))
    configuration_queue.append(user_config)

    # Anaconda addon configuration
    addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons"))
    # there is no longer a User class & addons should no longer need it
    # FIXME: drop user class parameter from the API & all known addons
    addon_config.append(Task("Configure Anaconda addons", ksdata.addons.execute, (storage, ksdata, None, payload)))
    configuration_queue.append(addon_config)

    # Initramfs generation
    generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs"))
    generate_initramfs.append(Task("Generate initramfs", payload.recreate_initrds))

    # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is
    # recreated after the first writeBootLoader call. This reruns it after the new initrd has
    # been created, fixing the kernel root and subvol args and adding the missing initrd entry.
    boot_on_btrfs = isinstance(storage.mountpoints.get("/"), BTRFSDevice)

    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED

    if isinstance(payload, LiveImagePayload) and boot_on_btrfs and bootloader_enabled:
        generate_initramfs.append(Task("Write BTRFS bootloader fix", write_boot_loader, (storage, payload)))

    # Invoking zipl should be the last thing done on a s390x installation (see #1652727).
    if arch.is_s390() and not conf.target.is_directory and bootloader_enabled:
        generate_initramfs.append(Task("Rerun zipl", lambda: util.execInSysroot("zipl", [])))

    configuration_queue.append(generate_initramfs)

    # join a realm (if required)
    if ksdata.realm.discovered:
        join_realm = TaskQueue("Realm join", N_("Joining realm: %s") % ksdata.realm.discovered)
        join_realm.append(Task("Join a realm", ksdata.realm.execute))
        configuration_queue.append(join_realm)

    post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts"))
    post_scripts.append(Task("Run post installation scripts", runPostScripts, (ksdata.scripts,)))
    configuration_queue.append(post_scripts)

    # setup kexec reboot if requested
    if flags.flags.kexec:
        kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec"))
        kexec_setup.append(Task("Setup kexec", setup_kexec))
        configuration_queue.append(kexec_setup)

    # write anaconda related configs & kickstarts
    write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts"))

    # Write the kickstart file to the installed system (or, copy the input
    # kickstart file over if one exists).
    if flags.flags.nosave_output_ks:
        # don't write the kickstart file to the installed system if this has
        # been disabled by the nosave option
        log.warning("Writing of the output kickstart to installed system has been disabled"
                    " by the nosave option.")
    else:
       # write anaconda related configs & kickstarts
        write_configs.append(Task("Store kickstarts", _writeKS, (ksdata,)))

    # only add write_configs to the main queue if we actually store some kickstarts/configs
    if write_configs.task_count:
        configuration_queue.append(write_configs)

    # notify progress tracking about the number of steps
    progress_init(configuration_queue.task_count)
    # log contents of the main task queue
    log.info(configuration_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = util.item_counter(configuration_queue.queue_count)
    task_started_counter = util.item_counter(configuration_queue.task_count)
    task_completed_counter = util.item_counter(configuration_queue.task_count)
    configuration_queue.queue_started.connect(lambda x: log.info("Queue started: %s (%s)", x.name, next(queue_counter)))
    configuration_queue.task_started.connect(lambda x: log.info("Task started: %s (%s)", x.name, next(task_started_counter)))
    configuration_queue.task_completed.connect(lambda x: log.debug("Task completed: %s (%s) (%1.1f s)",
                                                                   x.name, next(task_completed_counter),
                                                                   x.elapsed_time))
    # start the task queue
    configuration_queue.start()
    # done
    progress_complete()
Beispiel #8
0
    def install(self):
        progress_message(N_('Starting package installation process'))

        # Add the rpm macros to the global transaction environment
        for macro in self.rpmMacros:
            rpm.addMacro(macro[0], macro[1])

        if self.install_device:
            self._setupMedia(self.install_device)
        try:
            self.checkSoftwareSelection()
            self._download_location = self._pick_download_location()
        except payload.PayloadError as e:
            if errors.errorHandler.cb(e) == errors.ERROR_RAISE:
                log.error("Installation failed: %r", e)
                _failure_limbo()

        pkgs_to_download = self._base.transaction.install_set
        log.info('Downloading packages to %s.', self._download_location)
        progressQ.send_message(_('Downloading packages'))
        progress = DownloadProgress()
        try:
            self._base.download_packages(pkgs_to_download, progress)
        except dnf.exceptions.DownloadError as e:
            msg = 'Failed to download the following packages: %s' % str(e)
            exc = payload.PayloadInstallError(msg)
            if errors.errorHandler.cb(exc) == errors.ERROR_RAISE:
                log.error("Installation failed: %r", exc)
                _failure_limbo()

        log.info('Downloading packages finished.')

        pre_msg = (N_("Preparing transaction from installation source"))
        progress_message(pre_msg)

        queue_instance = multiprocessing.Queue()
        process = multiprocessing.Process(target=do_transaction,
                                          args=(self._base, queue_instance))
        process.start()
        (token, msg) = queue_instance.get()
        # When the installation works correctly it will get 'install' updates
        # followed by a 'post' message and then a 'quit' message.
        # If the installation fails it will send 'quit' without 'post'
        while token:
            if token == 'install':
                msg = _("Installing %s") % msg
                progressQ.send_message(msg)
            elif token == 'configure':
                msg = _("Configuring %s") % msg
                progressQ.send_message(msg)
            elif token == 'verify':
                msg = _("Verifying %s") % msg
                progressQ.send_message(msg)
            elif token == 'log':
                log.info(msg)
            elif token == 'post':
                msg = (N_("Performing post-installation setup tasks"))
                progressQ.send_message(msg)
            elif token == 'done':
                break  # Installation finished successfully
            elif token == 'quit':
                msg = ("Payload error - DNF installation has ended up abruptly: %s" % msg)
                raise payload.PayloadError(msg)
            elif token == 'error':
                exc = payload.PayloadInstallError("DNF error: %s" % msg)
                if errors.errorHandler.cb(exc) == errors.ERROR_RAISE:
                    log.error("Installation failed: %r", exc)
                    _failure_limbo()
            (token, msg) = queue_instance.get()

        process.join()
        self._base.close()
        if os.path.exists(self._download_location):
            log.info("Cleaning up downloaded packages: %s", self._download_location)
            shutil.rmtree(self._download_location)
        else:
            # Some installation sources, such as NFS, don't need to download packages to
            # local storage, so the download location might not always exist. So for now
            # warn about this, at least until the RFE in bug 1193121 is implemented and
            # we don't have to care about clearing the download location ourselves.
            log.warning("Can't delete nonexistent download location: %s", self._download_location)
Beispiel #9
0
    def install(self):
        progress_message(N_('Starting package installation process'))

        # Add the rpm macros to the global transaction environment
        for macro in self.rpmMacros:
            rpm.addMacro(macro[0], macro[1])

        if self.install_device:
            self._setupMedia(self.install_device)
        try:
            self.checkSoftwareSelection()
            self._download_location = self._pick_download_location()
        except packaging.PayloadError as e:
            if errors.errorHandler.cb(e) == errors.ERROR_RAISE:
                _failure_limbo()

        pkgs_to_download = self._base.transaction.install_set
        log.info('Downloading packages.')
        progressQ.send_message(_('Downloading packages'))
        progress = DownloadProgress()
        try:
            self._base.download_packages(pkgs_to_download, progress)
        except dnf.exceptions.DownloadError as e:
            msg = 'Failed to download the following packages: %s' % str(e)
            exc = packaging.PayloadInstallError(msg)
            if errors.errorHandler.cb(exc) == errors.ERROR_RAISE:
                _failure_limbo()

        log.info('Downloading packages finished.')

        pre_msg = (N_("Preparing transaction from installation source"))
        progress_message(pre_msg)

        queue_instance = multiprocessing.Queue()
        process = multiprocessing.Process(target=do_transaction,
                                          args=(self._base, queue_instance))
        process.start()
        (token, msg) = queue_instance.get()
        while token not in ('post', 'quit'):
            if token == 'install':
                msg = _("Installing %s") % msg
                progressQ.send_message(msg)
            (token, msg) = queue_instance.get()

        if token == 'quit':
            _failure_limbo()

        post_msg = (N_("Performing post-installation setup tasks"))
        progress_message(post_msg)
        process.join()
        self._base.close()
        if os.path.exists(self._download_location):
            log.info("Cleaning up downloaded packages: %s",
                     self._download_location)
            shutil.rmtree(self._download_location)
        else:
            # Some installation sources, such as NFS, don't need to download packages to
            # local storage, so the download location might not always exist. So for now
            # warn about this, at least until the RFE in bug 1193121 is implemented and
            # we don't have to care about clearing the download location ourselves.
            log.warning("Can't delete nonexistent download location: %s",
                        self._download_location)
Beispiel #10
0
def doInstall(storage, payload, ksdata, instClass):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    willRunRealmd = ksdata.realm.join_realm
    willInstallBootloader = not flags.flags.dirInstall and (not ksdata.bootloader.disabled
                                                            and ksdata.bootloader != "none")

    # First save system time to HW clock.
    if flags.can_touch_runtime_system("save system time to HW clock"):
        timezone.save_hw_clock(ksdata.timezone)

    # We really only care about actions that affect filesystems, since
    # those are the ones that take the most time.
    steps = len(storage.devicetree.findActions(action_type="create", object_type="format")) + \
            len(storage.devicetree.findActions(action_type="resize", object_type="format"))

    # pre setup phase, post install
    steps += 2

    # realmd, maybe
    if willRunRealmd:
        steps += 1

    # bootloader, maybe
    if willInstallBootloader:
        steps += 1

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        progress_init(steps+1)

        with progress_report(_("Waiting for %s threads to finish") % (threadMgr.running-1)):
            map(log.debug, ("Thread %s is running" % n for n in threadMgr.names))
            threadMgr.wait_all()
    else:
        progress_init(steps)

    with progress_report(_("Setting up the installation environment")):
        ksdata.firstboot.setup(storage, ksdata, instClass)
        ksdata.addons.setup(storage, ksdata, instClass)

    storage.updateKSData()  # this puts custom storage info into ksdata

    # Do partitioning.
    payload.preStorage()

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    step_clbk = lambda clbk_data: progress_step(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(clbk_data.msg,
                                                           clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(create_format_pre=message_clbk,
                                                            create_format_post=step_clbk,
                                                            resize_format_pre=message_clbk,
                                                            resize_format_post=step_clbk,
                                                            wait_for_entropy=entropy_wait_clbk)

    turnOnFilesystems(storage, mountOnly=flags.flags.dirInstall, callbacks=callbacks_reg)
    write_storage_late = (flags.flags.livecdInstall or ksdata.ostreesetup.seen
                          or ksdata.method.method == "liveimg")
    if not write_storage_late and not flags.flags.dirInstall:
        storage.write()

    # STACKI
    file = open('/proc/cmdline', 'r')
    args = file.readline().split()
    file.close()

    if 'boss' in args:
        import subprocess
        import os

        #
        # if we are a boss, then download the selected rolls
        #
        log.debug('STACKI: Downloading pallets: start')
        bossenv = os.environ.copy()
        bossenv['LD_LIBRARY_PATH'] = '/opt/stack/lib'
        bossenv['DISPLAY'] = ':1'
        s = subprocess.Popen('/opt/stack/bin/boss_download_pallets.py',
            env = bossenv)
        s.wait()
        log.debug('STACKI: Downloading pallets: complete')

        payload.reset()

        #
        # 
        #
        for repo in payload._yum.repos.repos.values():
            log.debug('STACKI: repo.id (%s)' % repo.id)
            # if repo.id not in [ 'stacki', 'os' ]:
                # log.debug('STACKI: disabling repo repo.id (%s)' % repo.id)
                # payload.disableRepo(repo.id)
    else:
        #
        # need to setup a symbolic link in order to store all the packages
        # downloaded by lighttpd
        #
        import os

        cmd = 'rm -rf /install ; '
        cmd += 'mkdir -p /mnt/sysimage/install ; '
        cmd += 'ln -s /mnt/sysimage/install /install'
        os.system(cmd)

    # STACKI

    # Do packaging.

    # Discover information about realms to join,
    # to determine additional packages
    if willRunRealmd:
        with progress_report(_("Discovering realm to join")):
            ksdata.realm.setup()

    # make name resolution work for rpm scripts in chroot
    if flags.can_touch_runtime_system("copy /etc/resolv.conf to sysroot"):
        network.copyFileToPath("/etc/resolv.conf", iutil.getSysroot())

    # Check for additional packages
    ksdata.authconfig.setup()
    ksdata.firewall.setup()

    # anaconda requires storage packages in order to make sure the target
    # system is bootable and configurable, and some other packages in order
    # to finish setting up the system.
    packages = storage.packages + ksdata.realm.packages
    packages += ksdata.authconfig.packages + ksdata.firewall.packages

    if willInstallBootloader:
        packages += storage.bootloader.packages

    if network.is_using_team_device():
        packages.append("teamd")

    # don't try to install packages from the install class' ignored list and the
    # explicitly excluded ones (user takes the responsibility)
    packages = [p for p in packages
                if p not in instClass.ignoredPackages and p not in ksdata.packages.excludedList]
    payload.preInstall(packages=packages, groups=payload.languageGroups())
    payload.install()

    if write_storage_late and not flags.flags.dirInstall:
        if iutil.getSysroot() != iutil.getTargetPhysicalRoot():
            blivet.setSysroot(iutil.getTargetPhysicalRoot(),
                              iutil.getSysroot())

            # Now that we have the FS layout in the target, umount
            # things that were in the legacy sysroot, and put them in
            # the target root, except for the physical /.  First,
            # unmount all target filesystems.
            storage.umountFilesystems()

            # Explicitly mount the root on the physical sysroot
            rootmnt = storage.mountpoints.get('/')
            rootmnt.setup()
            rootmnt.format.setup(options=rootmnt.format.options, chroot=iutil.getTargetPhysicalRoot())

            payload.prepareMountTargets(storage)

            # Everything else goes in the target root, including /boot
            # since the bootloader code will expect to find /boot
            # inside the chroot.
            storage.mountFilesystems(skipRoot=True)
        storage.write()

    # Do bootloader.
    if willInstallBootloader:
        with progress_report(_("Installing boot loader")):
            writeBootLoader(storage, payload, instClass, ksdata)

    with progress_report(_("Performing post-installation setup tasks")):
        payload.postInstall()

    progress_complete()
Beispiel #11
0
def downloadRoll(roll):
	(rollname, rollversion, rollarch, rollurl, diskid) = roll

	progress_message( _("Downloading Roll") + 
			" '%s' " % (rollname))

	#
	# test if this roll is in rocks format
	#
	isrocksroll = 1

	u = string.split(rollurl, '/')
	if len(u) > 2 and u[2] == '127.0.0.1':
		#
		# all CDs and DVDs will have the loopback IP address as the
		# the host name, so let's see if a specific directory exists
		# that will indicate to us if this is a rocks roll or
		# a or 'foreign' roll
		#
		p = os.path.join('/mnt/cdrom', rollname, rollversion, rollarch)
		if not os.path.exists(p):
			isrocksroll = 0

	path = os.path.join(rollname, rollversion, rollarch)

	cmd = '/opt/rocks/bin/rocks report distro'
	for line in os.popen(cmd).readlines():
		distrodir = line[:-1]

	localpath = '%s/%s/rolls/%s' % (iutil.getSysroot(),distrodir, path)

	if isrocksroll:
		url = '%s' % os.path.join(rollurl, path)
	else:
		#
		# this is not a rocks roll, so append the keywords 'RedHat'
		# and 'RPMS'  onto the local directory name. this allows us
		# to use CentOS and Scientific Linux CDs
		#
		localpath = os.path.join(localpath, 'RedHat', 'RPMS')

		#
		# change the url to point to the RPMS directory
		#
		cdtree = rocks.file.Tree('/mnt/cdrom')
		dirpath = ''
		for dir in cdtree.getDirs():
			d = string.split(dir, '/')
			if d[-1] == 'RPMS':
				dirpath = dir
				break
		
		url = os.path.join('http://127.0.0.1/mnt/cdrom', dirpath)

	cutdirs = len(string.split(url[7:], '/'))
	if isrocksroll:
		#
		# for rolls in rocks format, make sure we copy all the
		# files from the roll (e.g., 'RedHat' and 'base' directories)
		# this is useful for the kernel roll.
		#
		cutdirs -= 1

	subprocess.call('mkdir -p %s' % localpath, shell=True)
	os.chdir(localpath)

	if os.path.exists('/tmp/updates/rocks/bin/wget'):
		wget = '/tmp/updates/rocks/bin/wget'
	else:
		wget = '/usr/bin/wget'

	#
	# add resiliency flags to wget
	#
	flags = '--dns-timeout=3 --connect-timeout=3 --read-timeout=10 '
	flags += '--tries=3'

	cmd = '%s -m -nv -np -nH %s --cut-dirs=%d %s' \
		% (wget, flags, cutdirs, url)
	cmd += ' >> /tmp/wget.debug'
	subprocess.call(cmd, shell=True)

	subprocess.call('echo "%s" >> /tmp/wget.debug' % (cmd), shell=True)

	return
Beispiel #12
0
def doInstall(storage, payload, ksdata, instClass):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    willInstallBootloader = not flags.flags.dirInstall and (not ksdata.bootloader.disabled
                                                            and ksdata.bootloader != "none")

    installation_queue = TaskQueue("Installation queue")
    # connect progress reporting
    installation_queue.queue_started.connect(lambda x: progress_message(x.status_message))
    installation_queue.queue_completed.connect(lambda x: progress_step("%s -- DONE" % x.status_message))

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        # it could be that the threads finish execution before the task is executed,
        # but that should not cause any issues

        def wait_for_all_treads():
            for message in ("Thread %s is running" % n for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()

        # Use a queue with a single task as only TaskQueues have the status_message
        # property used for setting the progress status in the UI.
        wait_for_threads = TaskQueue("Wait for threads to finish",
                                     N_("Waiting for %s threads to finish") % (threadMgr.running - 1))

        wait_for_threads.append(Task("Wait for all threads to finish", wait_for_all_treads))
        installation_queue.append(wait_for_threads)

    # Save system time to HW clock.
    # - this used to be before waiting on threads, but I don't think that's needed
    if flags.can_touch_runtime_system("save system time to HW clock"):
        # lets just do this as a top-level task - no
        save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock, (ksdata.timezone,))
        installation_queue.append(save_hwclock)

    # setup the installation environment
    setup_environment = TaskQueue("Installation environment setup", N_("Setting up the installation environment"))
    setup_environment.append(Task("Setup firstboot", ksdata.firstboot.setup, (ksdata, instClass)))
    setup_environment.append(Task("Setup addons", ksdata.addons.setup, (storage, ksdata, instClass, payload)))
    installation_queue.append(setup_environment)

    # Do partitioning.
    # Depending on current payload the storage might be apparently configured
    # either before or after package/payload installation.
    # So let's have two task queues - early storage & late storage.
    early_storage = TaskQueue("Early storage configuration", N_("Configuring storage"))

    # put custom storage info into ksdata
    early_storage.append(Task("Insert custom storage to ksdata", storage.update_ksdata))

    # pre-storage tasks
    # - Is this actually needed ? It does not appear to do anything right now.
    early_storage.append(Task("Run pre-storage tasks", payload.preStorage))

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    step_clbk = lambda clbk_data: progress_step(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(clbk_data.msg,
                                                           clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(create_format_pre=message_clbk,
                                                            create_format_post=step_clbk,
                                                            resize_format_pre=message_clbk,
                                                            resize_format_post=step_clbk,
                                                            wait_for_entropy=entropy_wait_clbk)

    early_storage.append(Task("Activate filesystems",
                              task=turn_on_filesystems,
                              task_args=(storage,),
                              task_kwargs={"mount_only": flags.flags.dirInstall, "callbacks": callbacks_reg}))

    early_storage.append(Task("Write early storage", payload.writeStorageEarly))
    installation_queue.append(early_storage)

    # Run %pre-install scripts with the filesystem mounted and no packages
    pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts"))
    pre_install_scripts.append(Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts,)))
    installation_queue.append(pre_install_scripts)

    # Do packaging.

    # Discover information about realms to join to determine the need for additional packages.
    if ksdata.realm.join_realm:
        realm_discover = TaskQueue("Realm discover", N_("Discovering realm to join"))
        realm_discover.append(Task("Discover realm to join", ksdata.realm.setup))
        installation_queue.append(realm_discover)

    # Check for other possibly needed additional packages.
    pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks"))
    pre_install.append(Task("Setup authconfig", ksdata.authconfig.setup))
    pre_install.append(Task("Setup firewall", ksdata.firewall.setup))
    pre_install.append(Task("Setup network", ksdata.network.setup))
    # Setup timezone and add chrony as package if timezone was set in KS
    # and "-chrony" wasn't in packages section and/or --nontp wasn't set.
    pre_install.append(Task("Setup timezone", ksdata.timezone.setup, (ksdata,)))

    # make name resolution work for rpm scripts in chroot
    if flags.can_touch_runtime_system("copy /etc/resolv.conf to sysroot"):
        # we use a custom Task subclass as the sysroot path has to be resolved
        # only when the task is actually started, not at task creation time
        pre_install.append(WriteResolvConfTask("Copy /resolv.conf to sysroot"))

    def run_pre_install():
        """This means to gather what additional packages (if any) are needed & executing payload.preInstall()."""
        # anaconda requires storage packages in order to make sure the target
        # system is bootable and configurable, and some other packages in order
        # to finish setting up the system.
        packages = storage.packages + ksdata.realm.packages
        packages += ksdata.authconfig.packages + ksdata.firewall.packages + ksdata.network.packages

        if willInstallBootloader:
            packages += storage.bootloader.packages

        # don't try to install packages from the install class' ignored list and the
        # explicitly excluded ones (user takes the responsibility)
        packages = [p for p in packages
                    if p not in instClass.ignoredPackages and p not in ksdata.packages.excludedList]
        payload.preInstall(packages=packages, groups=payload.languageGroups())
    pre_install.append(Task("Find additional packages & run preInstall()", run_pre_install))
    installation_queue.append(pre_install)

    payload_install = TaskQueue("Payload installation", N_("Installing."))
    payload_install.append(Task("Install the payload", payload.install))
    installation_queue.append(payload_install)

    # for some payloads storage is configured after the payload is installed
    late_storage = TaskQueue("Late storage configuration", N_("Configuring storage"))
    late_storage.append(Task("Write late storage", payload.writeStorageLate))
    installation_queue.append(late_storage)

    # Do bootloader.
    if willInstallBootloader:
        bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader"))
        bootloader_install.append(Task("Install bootloader", writeBootLoader, (storage, payload, instClass, ksdata)))
        installation_queue.append(bootloader_install)

    post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks")))
    post_install.append(Task("Run post-installation setup tasks", payload.postInstall))
    installation_queue.append(post_install)

    # notify progress tracking about the number of steps
    progress_init(len(installation_queue))
    # log contents of the main task queue
    log.info(installation_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = iutil.item_counter(installation_queue.queue_count)
    task_started_counter = iutil.item_counter(installation_queue.task_count)
    task_completed_counter = iutil.item_counter(installation_queue.task_count)
    installation_queue.queue_started.connect(lambda x: log.info("Queue started: %s (%s)", x.name, next(queue_counter)))
    installation_queue.task_started.connect(lambda x: log.info("Task started: %s (%s)", x.name, next(task_started_counter)))
    installation_queue.task_completed.connect(lambda x: log.debug("Task completed: %s (%s) (%1.1f s)",
                                                                  x.name, next(task_completed_counter),
                                                                  x.elapsed_time))
    # start the task queue
    installation_queue.start()
    # done
    progress_complete()
Beispiel #13
0
def doInstall(storage, payload, ksdata, instClass):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    willInstallBootloader = not flags.flags.dirInstall and (
        not ksdata.bootloader.disabled and ksdata.bootloader != "none")

    installation_queue = TaskQueue("Installation queue")
    # connect progress reporting
    installation_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    installation_queue.queue_completed.connect(
        lambda x: progress_step("%s -- DONE" % x.status_message))

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        # it could be that the threads finish execution before the task is executed,
        # but that should not cause any issues

        def wait_for_all_treads():
            for message in ("Thread %s is running" % n
                            for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()

        # Use a queue with a single task as only TaskQueues have the status_message
        # property used for setting the progress status in the UI.
        wait_for_threads = TaskQueue(
            "Wait for threads to finish",
            N_("Waiting for %s threads to finish") % (threadMgr.running - 1))

        wait_for_threads.append(
            Task("Wait for all threads to finish", wait_for_all_treads))
        installation_queue.append(wait_for_threads)

    # Save system time to HW clock.
    # - this used to be before waiting on threads, but I don't think that's needed
    if flags.can_touch_runtime_system("save system time to HW clock"):
        # lets just do this as a top-level task - no
        save_hwclock = Task("Save system time to HW clock",
                            timezone.save_hw_clock, (ksdata.timezone, ))
        installation_queue.append(save_hwclock)

    # setup the installation environment
    setup_environment = TaskQueue(
        "Installation environment setup",
        N_("Setting up the installation environment"))
    setup_environment.append(
        Task("Setup firstboot", ksdata.firstboot.setup, (ksdata, instClass)))
    setup_environment.append(
        Task("Setup addons", ksdata.addons.setup,
             (storage, ksdata, instClass, payload)))
    installation_queue.append(setup_environment)

    # Do partitioning.
    # Depending on current payload the storage might be apparently configured
    # either before or after package/payload installation.
    # So let's have two task queues - early storage & late storage.
    early_storage = TaskQueue("Early storage configuration",
                              N_("Configuring storage"))

    # put custom storage info into ksdata
    early_storage.append(
        Task("Insert custom storage to ksdata", storage.update_ksdata))

    # pre-storage tasks
    # - Is this actually needed ? It does not appear to do anything right now.
    early_storage.append(Task("Run pre-storage tasks", payload.preStorage))

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    step_clbk = lambda clbk_data: progress_step(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(
        clbk_data.msg, clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(
        create_format_pre=message_clbk,
        create_format_post=step_clbk,
        resize_format_pre=message_clbk,
        resize_format_post=step_clbk,
        wait_for_entropy=entropy_wait_clbk)

    early_storage.append(
        Task("Activate filesystems",
             task=turn_on_filesystems,
             task_args=(storage, ),
             task_kwargs={
                 "mount_only": flags.flags.dirInstall,
                 "callbacks": callbacks_reg
             }))

    early_storage.append(Task("Write early storage",
                              payload.writeStorageEarly))
    installation_queue.append(early_storage)

    # Run %pre-install scripts with the filesystem mounted and no packages
    pre_install_scripts = TaskQueue("Pre-install scripts",
                                    N_("Running pre-installation scripts"))
    pre_install_scripts.append(
        Task("Run %pre-install scripts", runPreInstallScripts,
             (ksdata.scripts, )))
    installation_queue.append(pre_install_scripts)

    # Do packaging.

    # Discover information about realms to join to determine the need for additional packages.
    if ksdata.realm.join_realm:
        realm_discover = TaskQueue("Realm discover",
                                   N_("Discovering realm to join"))
        realm_discover.append(
            Task("Discover realm to join", ksdata.realm.setup))
        installation_queue.append(realm_discover)

    # Check for other possibly needed additional packages.
    pre_install = TaskQueue("Pre install tasks",
                            N_("Running pre-installation tasks"))
    pre_install.append(Task("Setup authconfig", ksdata.authconfig.setup))
    pre_install.append(Task("Setup firewall", ksdata.firewall.setup))
    pre_install.append(Task("Setup network", ksdata.network.setup))
    # Setup timezone and add chrony as package if timezone was set in KS
    # and "-chrony" wasn't in packages section and/or --nontp wasn't set.
    pre_install.append(
        Task("Setup timezone", ksdata.timezone.setup, (ksdata, )))

    # make name resolution work for rpm scripts in chroot
    if flags.can_touch_runtime_system("copy /etc/resolv.conf to sysroot"):
        # we use a custom Task subclass as the sysroot path has to be resolved
        # only when the task is actually started, not at task creation time
        pre_install.append(WriteResolvConfTask("Copy /resolv.conf to sysroot"))

    def run_pre_install():
        """This means to gather what additional packages (if any) are needed & executing payload.preInstall()."""
        # anaconda requires storage packages in order to make sure the target
        # system is bootable and configurable, and some other packages in order
        # to finish setting up the system.
        packages = storage.packages + ksdata.realm.packages
        packages += ksdata.authconfig.packages + ksdata.firewall.packages + ksdata.network.packages

        if willInstallBootloader:
            packages += storage.bootloader.packages

        # don't try to install packages from the install class' ignored list and the
        # explicitly excluded ones (user takes the responsibility)
        packages = [
            p for p in packages if p not in instClass.ignoredPackages
            and p not in ksdata.packages.excludedList
        ]
        payload.preInstall(packages=packages, groups=payload.languageGroups())

    pre_install.append(
        Task("Find additional packages & run preInstall()", run_pre_install))
    installation_queue.append(pre_install)

    payload_install = TaskQueue("Payload installation", N_("Installing."))
    payload_install.append(Task("Install the payload", payload.install))
    installation_queue.append(payload_install)

    # for some payloads storage is configured after the payload is installed
    late_storage = TaskQueue("Late storage configuration",
                             N_("Configuring storage"))
    late_storage.append(Task("Write late storage", payload.writeStorageLate))
    installation_queue.append(late_storage)

    # Do bootloader.
    if willInstallBootloader:
        bootloader_install = TaskQueue("Bootloader installation",
                                       N_("Installing boot loader"))
        bootloader_install.append(
            Task("Install bootloader", writeBootLoader,
                 (storage, payload, instClass, ksdata)))
        installation_queue.append(bootloader_install)

    post_install = TaskQueue("Post-installation setup tasks",
                             (N_("Performing post-installation setup tasks")))
    post_install.append(
        Task("Run post-installation setup tasks", payload.postInstall))
    installation_queue.append(post_install)

    # notify progress tracking about the number of steps
    progress_init(len(installation_queue))
    # log contents of the main task queue
    log.info(installation_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = iutil.item_counter(installation_queue.queue_count)
    task_started_counter = iutil.item_counter(installation_queue.task_count)
    task_completed_counter = iutil.item_counter(installation_queue.task_count)
    installation_queue.queue_started.connect(lambda x: log.info(
        "Queue started: %s (%s)", x.name, next(queue_counter)))
    installation_queue.task_started.connect(lambda x: log.info(
        "Task started: %s (%s)", x.name, next(task_started_counter)))
    installation_queue.task_completed.connect(
        lambda x: log.debug("Task completed: %s (%s) (%1.1f s)", x.name,
                            next(task_completed_counter), x.elapsed_time))
    # start the task queue
    installation_queue.start()
    # done
    progress_complete()
Beispiel #14
0
def doInstall(storage, payload, ksdata, instClass):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    willRunRealmd = ksdata.realm.join_realm
    willInstallBootloader = not flags.flags.dirInstall and not ksdata.bootloader.disabled

    # First save system time to HW clock.
    if flags.can_touch_runtime_system("save system time to HW clock"):
        timezone.save_hw_clock(ksdata.timezone)

    # We really only care about actions that affect filesystems, since
    # those are the ones that take the most time.
    steps = len(storage.devicetree.findActions(action_type="create", object_type="format")) + \
            len(storage.devicetree.findActions(action_type="resize", object_type="format"))

    # Update every 10% of packages installed.  We don't know how many packages
    # we are installing until it's too late (see realmd later on) so this is
    # the best we can do.
    steps += 10

    # pre setup phase, post install
    steps += 2

    # realmd, maybe
    if willRunRealmd:
        steps += 1

    # bootloader, maybe
    if willInstallBootloader:
        steps += 1

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        progress_init(steps + 1)

        with progress_report(
                _("Waiting for %s threads to finish") %
            (threadMgr.running - 1)):
            map(log.debug,
                ("Thread %s is running" % n for n in threadMgr.names))
            threadMgr.wait_all()
    else:
        progress_init(steps)

    with progress_report(_("Setting up the installation environment")):
        ksdata.firstboot.setup(storage, ksdata, instClass)
        ksdata.addons.setup(storage, ksdata, instClass)

    storage.updateKSData()  # this puts custom storage info into ksdata

    # Do partitioning.
    payload.preStorage()

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    step_clbk = lambda clbk_data: progress_step(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(
        clbk_data.msg, clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(
        create_format_pre=message_clbk,
        create_format_post=step_clbk,
        resize_format_pre=message_clbk,
        resize_format_post=step_clbk,
        wait_for_entropy=entropy_wait_clbk)

    turnOnFilesystems(storage,
                      mountOnly=flags.flags.dirInstall,
                      callbacks=callbacks_reg)
    write_storage_late = (flags.flags.livecdInstall or ksdata.ostreesetup.seen
                          or ksdata.method.method == "liveimg")
    if not write_storage_late and not flags.flags.dirInstall:
        storage.write()

    # STACKIQ
    file = open('/proc/cmdline', 'r')
    args = file.readline().split()
    file.close()

    if 'boss' in args:
        import subprocess

        #
        # if we are a boss, then download the selected rolls
        #
        log.debug('STACKI: Downloading pallets: start')
        s = subprocess.Popen('/opt/stack/bin/boss_download_pallets.py')
        s.wait()
        log.debug('STACKI: Downloading pallets: complete')

        payload.reset()

        #
        #
        #
        for repo in payload._yum.repos.repos.values():
            log.debug('STACKI: repo.id (%s)' % repo.id)
            if repo.id != 'stacki':
                log.debug('STACKI: disabling repo repo.id (%s)' % repo.id)
                payload.disableRepo(repo.id)
    else:
        #
        # need to setup a symbolic link in order to store all the packages
        # downloaded by lighttpd
        #
        import os

        cmd = 'rm -rf /install ; '
        cmd += 'mkdir -p /mnt/sysimage/install ; '
        cmd += 'ln -s /mnt/sysimage/install /install'
        os.system(cmd)

    # STACKIQ

    # Do packaging.

    # Discover information about realms to join,
    # to determine additional packages
    if willRunRealmd:
        with progress_report(_("Discovering realm to join")):
            ksdata.realm.setup()

    # Check for additional packages
    ksdata.authconfig.setup()
    ksdata.firewall.setup()

    # make name resolution work for rpm scripts in chroot
    if flags.can_touch_runtime_system("copy /etc/resolv.conf to sysroot"):
        network.copyFileToPath("/etc/resolv.conf", iutil.getSysroot())

    # anaconda requires storage packages in order to make sure the target
    # system is bootable and configurable, and some other packages in order
    # to finish setting up the system.
    packages = storage.packages + ksdata.realm.packages
    packages += ksdata.authconfig.packages + ksdata.firewall.packages

    if not ksdata.bootloader.disabled:
        packages += storage.bootloader.packages

    if network.is_using_team_device:
        packages.append("teamd")

    # don't try to install packages from the install class' ignored list and the
    # explicitly excluded ones (user takes the responsibility)
    packages = [
        p for p in packages if p not in instClass.ignoredPackages
        and p not in ksdata.packages.excludedList
    ]
    payload.preInstall(packages=packages, groups=payload.languageGroups())
    payload.install()

    if write_storage_late and not flags.flags.dirInstall:
        if iutil.getSysroot() != iutil.getTargetPhysicalRoot():
            blivet.setSysroot(iutil.getTargetPhysicalRoot(),
                              iutil.getSysroot())
            storage.write()

            # Now that we have the FS layout in the target, umount
            # things that were in the legacy sysroot, and put them in
            # the target root, except for the physical /.  First,
            # unmount all target filesystems.
            storage.umountFilesystems()

            # Explicitly mount the root on the physical sysroot
            rootmnt = storage.mountpoints.get('/')
            rootmnt.setup()
            rootmnt.format.setup(options=rootmnt.format.options,
                                 chroot=iutil.getTargetPhysicalRoot())

            payload.prepareMountTargets(storage)

            # Everything else goes in the target root, including /boot
            # since the bootloader code will expect to find /boot
            # inside the chroot.
            storage.mountFilesystems(skipRoot=True)
        else:
            storage.write()

    # Do bootloader.
    if willInstallBootloader:
        with progress_report(_("Installing boot loader")):
            writeBootLoader(storage, payload, instClass, ksdata)

    with progress_report(_("Performing post-installation setup tasks")):
        payload.postInstall()

    progress_complete()
Beispiel #15
0
 def execute(self, storage, ksdata, users, payload):
     """This method calls execute on all the registered addons."""
     for v in self.__dict__.values():
         if hasattr(v, "execute"):
             progress_message(N_("Executing %s addon") % v.name)
             v.execute(storage, ksdata, users, payload)
Beispiel #16
0
def _prepare_configuration(storage, payload, ksdata):
    """Configure the installed system."""

    configuration_queue = TaskQueue("Configuration queue")
    # connect progress reporting
    configuration_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    configuration_queue.task_completed.connect(lambda x: progress_step(x.name))

    # schedule the execute methods of ksdata that require an installed system to be present
    os_config = TaskQueue("Installed system configuration",
                          N_("Configuring installed system"))
    os_config.append(Task("Configure authselect", ksdata.authselect.execute))

    # add installation tasks for the Security DBus module
    security_proxy = SECURITY.get_proxy()
    security_dbus_tasks = security_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(SECURITY, security_dbus_tasks)

    # add installation tasks for the Services DBus module
    services_proxy = SERVICES.get_proxy()
    services_dbus_tasks = services_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(SERVICES, services_dbus_tasks)

    # add installation tasks for the Timezone DBus module
    timezone_proxy = TIMEZONE.get_proxy()
    timezone_dbus_tasks = timezone_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(TIMEZONE, timezone_dbus_tasks)

    # add installation tasks for the Localization DBus module
    localization_proxy = LOCALIZATION.get_proxy()
    localization_dbus_tasks = localization_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(LOCALIZATION, localization_dbus_tasks)

    # add the Firewall configuration task
    firewall_proxy = NETWORK.get_proxy(FIREWALL)
    firewall_dbus_task = firewall_proxy.InstallWithTask()
    os_config.append_dbus_tasks(NETWORK, [firewall_dbus_task])

    configuration_queue.append(os_config)

    # schedule network configuration (if required)
    if conf.system.provides_network_config:
        overwrite = isinstance(payload, LiveImagePayload)
        network_config = TaskQueue("Network configuration",
                                   N_("Writing network configuration"))
        network_config.append(
            Task("Network configuration", network.write_configuration,
                 (overwrite, )))
        configuration_queue.append(network_config)

    # add installation tasks for the Users DBus module
    user_config = TaskQueue("User creation", N_("Creating users"))
    users_proxy = USERS.get_proxy()
    users_dbus_tasks = users_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(USERS, users_dbus_tasks)
    configuration_queue.append(user_config)

    # Anaconda addon configuration
    addon_config = TaskQueue("Anaconda addon configuration",
                             N_("Configuring addons"))
    # there is no longer a User class & addons should no longer need it
    # FIXME: drop user class parameter from the API & all known addons
    addon_config.append(
        Task("Configure Anaconda addons", ksdata.addons.execute,
             (storage, ksdata, None, payload)))

    boss_proxy = BOSS.get_proxy()
    addon_config.append_dbus_tasks(BOSS, [boss_proxy.InstallSystemWithTask()])

    configuration_queue.append(addon_config)

    # Initramfs generation
    generate_initramfs = TaskQueue("Initramfs generation",
                                   N_("Generating initramfs"))
    generate_initramfs.append(
        Task("Generate initramfs", payload.recreate_initrds))

    # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is
    # recreated after the first writeBootLoader call. This reruns it after the new initrd has
    # been created, fixing the kernel root and subvol args and adding the missing initrd entry.
    boot_on_btrfs = isinstance(storage.mountpoints.get("/"), BTRFSDevice)

    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED

    if isinstance(payload,
                  LiveImagePayload) and boot_on_btrfs and bootloader_enabled:
        generate_initramfs.append(
            Task("Write BTRFS bootloader fix", write_boot_loader,
                 (storage, payload)))

    # Invoking zipl should be the last thing done on a s390x installation (see #1652727).
    if arch.is_s390() and not conf.target.is_directory and bootloader_enabled:
        generate_initramfs.append(
            Task("Rerun zipl", lambda: util.execInSysroot("zipl", [])))

    configuration_queue.append(generate_initramfs)

    # realm join
    # - this can run only after network is configured in the target system chroot
    configuration_queue.append_dbus_tasks(SECURITY,
                                          [security_proxy.JoinRealmWithTask()])

    post_scripts = TaskQueue("Post installation scripts",
                             N_("Running post-installation scripts"))
    post_scripts.append(
        Task("Run post installation scripts", runPostScripts,
             (ksdata.scripts, )))
    configuration_queue.append(post_scripts)

    # setup kexec reboot if requested
    if flags.flags.kexec:
        kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec"))
        kexec_setup.append(Task("Setup kexec", setup_kexec))
        configuration_queue.append(kexec_setup)

    # write anaconda related configs & kickstarts
    write_configs = TaskQueue("Write configs and kickstarts",
                              N_("Storing configuration files and kickstarts"))

    # Write the kickstart file to the installed system (or, copy the input
    # kickstart file over if one exists).
    if flags.flags.nosave_output_ks:
        # don't write the kickstart file to the installed system if this has
        # been disabled by the nosave option
        log.warning(
            "Writing of the output kickstart to installed system has been disabled"
            " by the nosave option.")
    else:
        # write anaconda related configs & kickstarts
        write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, )))

    # only add write_configs to the main queue if we actually store some kickstarts/configs
    if write_configs.task_count:
        configuration_queue.append(write_configs)

    return configuration_queue
Beispiel #17
0
def _prepare_installation(payload, ksdata):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    installation_queue = TaskQueue("Installation queue")
    # connect progress reporting
    installation_queue.queue_started.connect(lambda x: progress_message(x.status_message))
    installation_queue.task_completed.connect(lambda x: progress_step(x.name))

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        # it could be that the threads finish execution before the task is executed,
        # but that should not cause any issues

        def wait_for_all_treads():
            for message in ("Thread %s is running" % n for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()

        # Use a queue with a single task as only TaskQueues have the status_message
        # property used for setting the progress status in the UI.
        wait_for_threads = TaskQueue("Wait for threads to finish",
                                     N_("Waiting for %s threads to finish") % (threadMgr.running - 1))

        wait_for_threads.append(Task("Wait for all threads to finish", wait_for_all_treads))
        installation_queue.append(wait_for_threads)

    # Save system time to HW clock.
    # - this used to be before waiting on threads, but I don't think that's needed
    if conf.system.can_set_hardware_clock:
        # lets just do this as a top-level task - no
        save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock)
        installation_queue.append(save_hwclock)

    # setup the installation environment
    setup_environment = TaskQueue("Installation environment setup", N_("Setting up the installation environment"))
    setup_environment.append(Task(
        "Setup addons",
        ksdata.addons.setup,
        (None, ksdata, payload)
    ))

    boss_proxy = BOSS.get_proxy()
    setup_environment.append_dbus_tasks(BOSS, [boss_proxy.ConfigureRuntimeWithTask()])

    installation_queue.append(setup_environment)

    # Do partitioning.
    # Depending on current payload the storage might be apparently configured
    # either before or after package/payload installation.
    # So let's have two task queues - early storage & late storage.
    storage_proxy = STORAGE.get_proxy()
    early_storage = TaskQueue("Early storage configuration", N_("Configuring storage"))
    early_storage.append_dbus_tasks(STORAGE, storage_proxy.InstallWithTasks())

    if payload.needs_storage_configuration:
        conf_task = storage_proxy.WriteConfigurationWithTask()
        early_storage.append_dbus_tasks(STORAGE, [conf_task])

    installation_queue.append(early_storage)

    # Run %pre-install scripts with the filesystem mounted and no packages
    pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts"))
    pre_install_scripts.append(Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts,)))
    installation_queue.append(pre_install_scripts)

    # Do various pre-installation tasks
    # - try to discover a realm (if any)
    # - check for possibly needed additional packages.
    pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks"))

    # make name resolution work for rpm scripts in chroot
    if conf.system.provides_resolver_config:
        # we use a custom Task subclass as the sysroot path has to be resolved
        # only when the task is actually started, not at task creation time
        pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot"))

    # realm discovery
    security_proxy = SECURITY.get_proxy()
    pre_install.append_dbus_tasks(SECURITY, [security_proxy.DiscoverRealmWithTask()])

    def run_pre_install():
        """This means to gather what additional packages (if any) are needed & executing payload.pre_install()."""
        # anaconda requires storage packages in order to make sure the target
        # system is bootable and configurable, and some other packages in order
        # to finish setting up the system.
        if kernel_arguments.is_enabled("fips"):
            payload.requirements.add_packages(['/usr/bin/fips-mode-setup'], reason="compliance")

        payload.requirements.add_groups(payload.language_groups(), reason="language groups")
        payload.requirements.add_packages(payload.langpacks(), reason="langpacks", strong=False)

        # add package requirements from modules
        # - iterate over all modules we know have valid package requirements
        # - add any requirements found to the payload requirement tracking
        modules_with_package_requirements = [SECURITY, NETWORK, TIMEZONE, STORAGE, SUBSCRIPTION]
        for module in modules_with_package_requirements:
            # Skip unavailable modules.
            if not is_module_available(module):
                continue

            module_proxy = module.get_proxy()
            module_requirements = Requirement.from_structure_list(module_proxy.CollectRequirements())
            log.debug("Adding requirements for module %s : %s", module, module_requirements)
            payload.requirements.add_requirements(module_requirements)

        payload.pre_install()

    pre_install.append(Task("Find additional packages & run pre_install()", run_pre_install))
    installation_queue.append(pre_install)

    payload_install = TaskQueue("Payload installation", N_("Installing."))
    payload_install.append(Task("Install the payload", payload.install))
    installation_queue.append(payload_install)

    # for some payloads storage is configured after the payload is installed
    if not payload.needs_storage_configuration:
        late_storage = TaskQueue("Late storage configuration", N_("Configuring storage"))
        conf_task = storage_proxy.WriteConfigurationWithTask()
        late_storage.append_dbus_tasks(STORAGE, [conf_task])
        installation_queue.append(late_storage)

    # Do bootloader.
    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader"))

    def configure_bootloader():
        boot_task = bootloader_proxy.ConfigureWithTask(payload.kernel_version_list)
        sync_run_task(STORAGE.get_proxy(boot_task))

    if not payload.handles_bootloader_configuration:
        # FIXME: This is a temporary workaround, run the DBus task directly.
        bootloader_install.append(Task("Configure the bootloader", configure_bootloader))

    bootloader_install.append_dbus_tasks(STORAGE, [bootloader_proxy.InstallWithTask()])
    installation_queue.append(bootloader_install)

    post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks")))
    post_install.append(Task("Run post-installation setup tasks", payload.post_install))
    installation_queue.append(post_install)

    # Create snapshot
    snapshot_proxy = STORAGE.get_proxy(SNAPSHOT)

    if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL):
        snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots"))
        snapshot_task = snapshot_proxy.CreateWithTask(SNAPSHOT_WHEN_POST_INSTALL)
        snapshot_creation.append_dbus_tasks(STORAGE, [snapshot_task])
        installation_queue.append(snapshot_creation)

    return installation_queue
Beispiel #18
0
def RocksGetRolls():
	#
	# download the selected rolls
	#

	#
	# if there isn't a /tmp/rolls.xml file or if this is a client install,
	# then there is are no rolls to fetch -- so return
	#
	media = rocks.media.Media()

	if not os.path.exists('/tmp/rolls.xml'):
		if media.mounted():
			media.ejectCD()
		return

	#
	# in a default installation, make sure /export points to a partition
	# *not* on the '/' partition
	#
	cwd = os.getcwd()
	os.chdir(iutil.getSysroot())

	if not os.path.exists('state'):
		os.mkdir('state')
	if not os.path.exists('state/partition1'):
		os.mkdir('state/partition1')

	try:
		os.symlink('state/partition1', 'export')
	except:
		pass
	os.chdir(cwd)

	#
	# get the roll list by parsing /tmp/rolls.xml
	#
	generator = rocks.roll.Generator()
	generator.parse('/tmp/rolls.xml')

	cwd = os.getcwd()

	#
	# get all the CD-based rolls first
	#
	diskids = []
	for roll in generator.rolls:
		(name, version, arch, url, diskid) = roll

		if diskid != ''  and diskid != 'None' and diskid not in diskids:
			diskids.append(diskid)

	diskids.sort()
	for d in diskids:
		#
		# ask the user to put the right media in the bay
		#
		checkCD(media, d)

		#
		# then, for each selected roll on this disk, copy it
		#
		for roll in generator.rolls:
			(name, version, arch, url, diskid) = roll
			if diskid == d:
				downloadRoll(anaconda, roll)

	if media.mounted():
		media.ejectCD()

	#
	# now get all the network rolls
	#
	for roll in generator.rolls:
		(rollname, rollversion, rollarch, rollurl, diskid) = roll

		if diskid != 'None':
			continue

		downloadRoll(roll)

	os.chdir(cwd)

	#
	# rebuild the distro
	#
	progress_message(_("Rebuilding the Rocks Distribution..."))

	cmd = '/opt/rocks/bin/rocks report distro'
	for line in os.popen(cmd).readlines():
		distrodir = line[:-1]

	rootdir = '%s/%s' % (iutil.getSysroot(),distrodir)
	
	path = ''
	try:
		path = os.environ['PATH']
	except:
		pass

	#
	# need this path so rocks-dist uses the 'rpm' binary from the rocks
	# path and not the 'busybox' rpm
	#
	os.environ['PATH'] = '/mnt/runtime/rocks/bin:' + path

	installcgi = rocks.installcgi.InstallCGI(rootdir)
	installcgi.createPopt()
	installcgi.rebuildDistro(generator.rolls)

	#
	# make sure the link to the repository is correct for the package
	# installation portion of the install
	#
	arch = os.uname()[4]
	if arch in ['i386', 'i486', 'i586', 'i686']:
		arch = 'i386'

	subprocess.call('umount /mnt/cdrom', shell=True)
	subprocess.call('rm -rf /mnt/cdrom', shell=True)
	dir = '/mnt/sysimage/%s/rocks-dist/%s/' % (distrodir, arch)
	subprocess.call('ln -s %s /mnt/cdrom' % (dir), shell=True)

	return
Beispiel #19
0
def RocksGetRolls():
    #
    # download the selected rolls
    #

    #
    # if there isn't a /tmp/rolls.xml file or if this is a client install,
    # then there is are no rolls to fetch -- so return
    #
    media = rocks.media.Media()

    if not os.path.exists('/tmp/rolls.xml'):
        if media.mounted():
            media.ejectCD()
        return

    #
    # in a default installation, make sure /export points to a partition
    # *not* on the '/' partition
    #
    cwd = os.getcwd()
    os.chdir(iutil.getSysroot())

    if not os.path.exists('state'):
        os.mkdir('state')
    if not os.path.exists('state/partition1'):
        os.mkdir('state/partition1')

    try:
        os.symlink('state/partition1', 'export')
    except:
        pass
    os.chdir(cwd)

    #
    # get the roll list by parsing /tmp/rolls.xml
    #
    generator = rocks.roll.Generator()
    generator.parse('/tmp/rolls.xml')

    cwd = os.getcwd()

    #
    # get all the CD-based rolls first
    #
    diskids = []
    for roll in generator.rolls:
        (name, version, arch, url, diskid) = roll

        if diskid != '' and diskid != 'None' and diskid not in diskids:
            diskids.append(diskid)

    diskids.sort()
    for d in diskids:
        #
        # ask the user to put the right media in the bay
        #
        checkCD(media, d)

        #
        # then, for each selected roll on this disk, copy it
        #
        for roll in generator.rolls:
            (name, version, arch, url, diskid) = roll
            if diskid == d:
                downloadRoll(anaconda, roll)

    if media.mounted():
        media.ejectCD()

    #
    # now get all the network rolls
    #
    for roll in generator.rolls:
        (rollname, rollversion, rollarch, rollurl, diskid) = roll

        if diskid != 'None':
            continue

        downloadRoll(roll)

    os.chdir(cwd)

    #
    # rebuild the distro
    #
    progress_message(_("Rebuilding the Rocks Distribution..."))

    cmd = '/opt/rocks/bin/rocks report distro'
    for line in os.popen(cmd).readlines():
        distrodir = line[:-1]

    rootdir = '%s/%s' % (iutil.getSysroot(), distrodir)

    path = ''
    try:
        path = os.environ['PATH']
    except:
        pass

    #
    # need this path so rocks-dist uses the 'rpm' binary from the rocks
    # path and not the 'busybox' rpm
    #
    os.environ['PATH'] = '/mnt/runtime/rocks/bin:' + path

    installcgi = rocks.installcgi.InstallCGI(rootdir)
    installcgi.createPopt()
    installcgi.rebuildDistro(generator.rolls)

    #
    # make sure the link to the repository is correct for the package
    # installation portion of the install
    #
    arch = os.uname()[4]
    if arch in ['i386', 'i486', 'i586', 'i686']:
        arch = 'i386'

    subprocess.call('umount /mnt/cdrom', shell=True)
    subprocess.call('rm -rf /mnt/cdrom', shell=True)
    dir = '/mnt/sysimage/%s/rocks-dist/%s/' % (distrodir, arch)
    subprocess.call('ln -s %s /mnt/cdrom' % (dir), shell=True)

    return
Beispiel #20
0
def doConfiguration(storage, payload, ksdata, instClass):
    """Configure the installed system."""

    configuration_queue = TaskQueue("Configuration queue")
    # connect progress reporting
    configuration_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    configuration_queue.queue_completed.connect(
        lambda x: progress_step("%s -- DONE" % x.status_message))

    # schedule the execute methods of ksdata that require an installed system to be present
    os_config = TaskQueue("Installed system configuration",
                          N_("Configuring installed system"))
    os_config.append(
        Task("Configure authselect", ksdata.authselect.execute,
             (storage, ksdata, instClass)))
    os_config.append(
        Task("Configure SELinux", ksdata.selinux.execute,
             (storage, ksdata, instClass)))
    os_config.append(
        Task("Configure first boot tasks", ksdata.firstboot.execute,
             (storage, ksdata, instClass)))
    os_config.append(
        Task("Configure services", ksdata.services.execute,
             (storage, ksdata, instClass)))
    os_config.append(
        Task("Configure keyboard", ksdata.keyboard.execute,
             (storage, ksdata, instClass)))
    os_config.append(
        Task("Configure timezone", ksdata.timezone.execute,
             (storage, ksdata, instClass)))
    os_config.append(
        Task("Configure language", ksdata.lang.execute,
             (storage, ksdata, instClass)))
    os_config.append(
        Task("Configure firewall", ksdata.firewall.execute,
             (storage, ksdata, instClass)))
    os_config.append(
        Task("Configure X", ksdata.xconfig.execute,
             (storage, ksdata, instClass)))
    configuration_queue.append(os_config)

    # schedule network configuration (if required)
    will_write_network = not flags.flags.imageInstall and not flags.flags.dirInstall
    if will_write_network:
        network_config = TaskQueue("Network configuration",
                                   N_("Writing network configuration"))
        network_config.append(
            Task("Network configuration", ksdata.network.execute,
                 (storage, ksdata, instClass)))
        configuration_queue.append(network_config)

    # creating users and groups requires some pre-configuration.
    u = Users()
    user_config = TaskQueue("User creation", N_("Creating users"))
    user_config.append(
        Task("Configure root", ksdata.rootpw.execute,
             (storage, ksdata, instClass, u)))
    user_config.append(
        Task("Configure user groups", ksdata.group.execute,
             (storage, ksdata, instClass, u)))
    user_config.append(
        Task("Configure user", ksdata.user.execute,
             (storage, ksdata, instClass, u)))
    user_config.append(
        Task("Configure SSH key", ksdata.sshkey.execute,
             (storage, ksdata, instClass, u)))
    configuration_queue.append(user_config)

    # Anaconda addon configuration
    addon_config = TaskQueue("Anaconda addon configuration",
                             N_("Configuring addons"))
    addon_config.append(
        Task("Configure Anaconda addons", ksdata.addons.execute,
             (storage, ksdata, instClass, u, payload)))
    configuration_queue.append(addon_config)

    # Initramfs generation
    generate_initramfs = TaskQueue("Initramfs generation",
                                   N_("Generating initramfs"))
    generate_initramfs.append(
        Task("Generate initramfs", payload.recreateInitrds))

    # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is
    # recreated after the first writeBootLoader call. This reruns it after the new initrd has
    # been created, fixing the kernel root and subvol args and adding the missing initrd entry.
    boot_on_btrfs = isinstance(storage.mountpoints.get("/"), BTRFSDevice)
    bootloader_enabled = not ksdata.bootloader.disabled and ksdata.bootloader != "none"
    if flags.flags.livecdInstall and boot_on_btrfs and bootloader_enabled:
        generate_initramfs.append(
            Task("Write BTRFS bootloader fix", writeBootLoader,
                 (storage, payload, instClass, ksdata)))
    configuration_queue.append(generate_initramfs)

    # join a realm (if required)
    if ksdata.realm.discovered:
        join_realm = TaskQueue(
            "Realm join",
            N_("Joining realm: %s") % ksdata.realm.discovered)
        join_realm.append(
            Task("Join a realm", ksdata.realm.execute,
                 (storage, ksdata, instClass)))
        configuration_queue.append(join_realm)

    post_scripts = TaskQueue("Post installation scripts",
                             N_("Running post-installation scripts"))
    post_scripts.append(
        Task("Run post installation scripts", runPostScripts,
             (ksdata.scripts, )))
    configuration_queue.append(post_scripts)

    # setup kexec reboot if requested
    if flags.flags.kexec:
        kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec"))
        kexec_setup.append(Task("Setup kexec", setup_kexec))
        configuration_queue.append(kexec_setup)

    # write anaconda related configs & kickstarts
    write_configs = TaskQueue("Write configs and kickstarts",
                              N_("Storing configuration files and kickstarts"))

    # Write the kickstart file to the installed system (or, copy the input
    # kickstart file over if one exists).
    if flags.flags.nosave_output_ks:
        # don't write the kickstart file to the installed system if this has
        # been disabled by the nosave option
        log.warning(
            "Writing of the output kickstart to installed system has been disabled"
            " by the nosave option.")
    else:
        # write anaconda related configs & kickstarts
        write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, )))

    # Write out the user interaction config file.
    #
    # But make sure it's not written out in the image and directory installation mode,
    # as that might result in spokes being inadvertently hidden when the actual installation
    # starts from the generate image or directory contents.
    if flags.flags.imageInstall:
        log.info(
            "Not writing out user interaction config file due to image install mode."
        )
    elif flags.flags.dirInstall:
        log.info(
            "Not writing out user interaction config file due to directory install mode."
        )
    else:
        write_configs.append(
            Task("Store user interaction config",
                 screen_access.sam.write_out_config_file))

    # only add write_configs to the main queue if we actually store some kickstarts/configs
    if write_configs.task_count:
        configuration_queue.append(write_configs)

    # notify progress tracking about the number of steps
    progress_init(len(configuration_queue))
    # log contents of the main task queue
    log.info(configuration_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = util.item_counter(configuration_queue.queue_count)
    task_started_counter = util.item_counter(configuration_queue.task_count)
    task_completed_counter = util.item_counter(configuration_queue.task_count)
    configuration_queue.queue_started.connect(lambda x: log.info(
        "Queue started: %s (%s)", x.name, next(queue_counter)))
    configuration_queue.task_started.connect(lambda x: log.info(
        "Task started: %s (%s)", x.name, next(task_started_counter)))
    configuration_queue.task_completed.connect(
        lambda x: log.debug("Task completed: %s (%s) (%1.1f s)", x.name,
                            next(task_completed_counter), x.elapsed_time))
    # start the task queue
    configuration_queue.start()
    # done
    progress_complete()
Beispiel #21
0
def doInstall(storage, payload, ksdata, instClass):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED
    can_install_bootloader = not conf.target.is_directory and bootloader_enabled

    installation_queue = TaskQueue("Installation queue")
    # connect progress reporting
    installation_queue.queue_started.connect(lambda x: progress_message(x.status_message))
    installation_queue.task_completed.connect(lambda x: progress_step(x.name))

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        # it could be that the threads finish execution before the task is executed,
        # but that should not cause any issues

        def wait_for_all_treads():
            for message in ("Thread %s is running" % n for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()

        # Use a queue with a single task as only TaskQueues have the status_message
        # property used for setting the progress status in the UI.
        wait_for_threads = TaskQueue("Wait for threads to finish",
                                     N_("Waiting for %s threads to finish") % (threadMgr.running - 1))

        wait_for_threads.append(Task("Wait for all threads to finish", wait_for_all_treads))
        installation_queue.append(wait_for_threads)

    # Save system time to HW clock.
    # - this used to be before waiting on threads, but I don't think that's needed
    if conf.system.can_set_hardware_clock:
        # lets just do this as a top-level task - no
        save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock)
        installation_queue.append(save_hwclock)

    # setup the installation environment
    setup_environment = TaskQueue("Installation environment setup", N_("Setting up the installation environment"))
    setup_environment.append(Task("Setup addons", ksdata.addons.setup, (storage, ksdata, instClass, payload)))
    installation_queue.append(setup_environment)

    # Do partitioning.
    # Depending on current payload the storage might be apparently configured
    # either before or after package/payload installation.
    # So let's have two task queues - early storage & late storage.
    early_storage = TaskQueue("Early storage configuration", N_("Configuring storage"))

    # put custom storage info into ksdata, but not if just assigning mount points
    manual_part_proxy = STORAGE.get_proxy(MANUAL_PARTITIONING)

    if not manual_part_proxy.Enabled:
        early_storage.append(Task("Insert custom storage to ksdata", storage.update_ksdata))

    # pre-storage tasks
    # - Is this actually needed ? It does not appear to do anything right now.
    early_storage.append(Task("Run pre-storage tasks", payload.preStorage))

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(clbk_data.msg,
                                                           clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(create_format_pre=message_clbk,
                                                            resize_format_pre=message_clbk,
                                                            wait_for_entropy=entropy_wait_clbk)
    if conf.target.is_directory:
        early_storage.append(Task("Mount filesystems",
                                  task=storage.mount_filesystems))
    else:
        early_storage.append(Task("Activate filesystems",
                                  task=turn_on_filesystems,
                                  task_args=(storage,),
                                  task_kwargs={"callbacks": callbacks_reg}))

    early_storage.append(Task("Write early storage", payload.writeStorageEarly))
    installation_queue.append(early_storage)

    # Run %pre-install scripts with the filesystem mounted and no packages
    pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts"))
    pre_install_scripts.append(Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts,)))
    installation_queue.append(pre_install_scripts)

    # Do packaging.

    # Discover information about realms to join to determine the need for additional packages.
    realm_discover = TaskQueue("Realm discover", N_("Discovering realm to join"))
    realm_discover.append(Task("Discover realm to join", ksdata.realm.setup))
    installation_queue.append(realm_discover)

    # Check for other possibly needed additional packages.
    pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks"))
    pre_install.append(Task("Setup authselect", ksdata.authselect.setup))
    pre_install.append(Task("Setup firewall", ksdata.firewall.setup))
    pre_install.append(Task("Setup network", ksdata.network.setup))
    # Setup timezone and add chrony as package if timezone was set in KS
    # and "-chrony" wasn't in packages section and/or --nontp wasn't set.
    pre_install.append(Task("Setup timezone", ksdata.timezone.setup, (ksdata,)))

    # make name resolution work for rpm scripts in chroot
    if conf.system.provides_resolver_config:
        # we use a custom Task subclass as the sysroot path has to be resolved
        # only when the task is actually started, not at task creation time
        pre_install.append(WriteResolvConfTask("Copy /resolv.conf to sysroot"))

    def run_pre_install():
        """This means to gather what additional packages (if any) are needed & executing payload.preInstall()."""
        # anaconda requires storage packages in order to make sure the target
        # system is bootable and configurable, and some other packages in order
        # to finish setting up the system.
        payload.requirements.add_packages(storage.packages, reason="storage")
        payload.requirements.add_packages(ksdata.realm.packages, reason="realm")
        payload.requirements.add_packages(ksdata.authselect.packages, reason="authselect")
        payload.requirements.add_packages(ksdata.firewall.packages, reason="firewall")
        payload.requirements.add_packages(ksdata.network.packages, reason="network")
        payload.requirements.add_packages(ksdata.timezone.packages, reason="ntp", strong=False)

        if can_install_bootloader:
            payload.requirements.add_packages(storage.bootloader.packages, reason="bootloader")
        payload.requirements.add_groups(payload.languageGroups(), reason="language groups")
        payload.requirements.add_packages(payload.langpacks(), reason="langpacks", strong=False)
        payload.preInstall()

    pre_install.append(Task("Find additional packages & run preInstall()", run_pre_install))
    installation_queue.append(pre_install)

    payload_install = TaskQueue("Payload installation", N_("Installing."))
    payload_install.append(Task("Install the payload", payload.install))
    installation_queue.append(payload_install)

    # for some payloads storage is configured after the payload is installed
    late_storage = TaskQueue("Late storage configuration", N_("Configuring storage"))
    late_storage.append(Task("Write late storage", payload.writeStorageLate))
    installation_queue.append(late_storage)

    # Do bootloader.
    if can_install_bootloader:
        bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader"))
        bootloader_install.append(Task("Install bootloader", writeBootLoader, (storage, payload, instClass, ksdata)))
        installation_queue.append(bootloader_install)

    post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks")))
    post_install.append(Task("Run post-installation setup tasks", payload.postInstall))
    installation_queue.append(post_install)

    # Create snapshot
    if ksdata.snapshot and ksdata.snapshot.has_snapshot(SNAPSHOT_WHEN_POST_INSTALL):
        snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots"))
        snapshot_creation.append(Task("Create post-install snapshots", ksdata.snapshot.execute, (storage, ksdata, instClass)))
        installation_queue.append(snapshot_creation)

    # notify progress tracking about the number of steps
    progress_init(installation_queue.task_count)
    # log contents of the main task queue
    log.info(installation_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = util.item_counter(installation_queue.queue_count)
    task_started_counter = util.item_counter(installation_queue.task_count)
    task_completed_counter = util.item_counter(installation_queue.task_count)
    installation_queue.queue_started.connect(lambda x: log.info("Queue started: %s (%s)", x.name, next(queue_counter)))
    installation_queue.task_started.connect(lambda x: log.info("Task started: %s (%s)", x.name, next(task_started_counter)))
    installation_queue.task_completed.connect(lambda x: log.debug("Task completed: %s (%s) (%1.1f s)",
                                                                  x.name, next(task_completed_counter),
                                                                  x.elapsed_time))
    # start the task queue
    installation_queue.start()
    # done
    progress_complete()
Beispiel #22
0
def doInstall(storage, payload, ksdata, instClass):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    willRunRealmd = ksdata.realm.join_realm
    willInstallBootloader = not flags.flags.dirInstall and (not ksdata.bootloader.disabled
                                                            and ksdata.bootloader != "none")

    # First save system time to HW clock.
    if flags.can_touch_runtime_system("save system time to HW clock"):
        timezone.save_hw_clock(ksdata.timezone)

    # We really only care about actions that affect filesystems, since
    # those are the ones that take the most time.
    steps = len(storage.devicetree.findActions(action_type="create", object_type="format")) + \
            len(storage.devicetree.findActions(action_type="resize", object_type="format"))

    # Update every 10% of packages installed.  We don't know how many packages
    # we are installing until it's too late (see realmd later on) so this is
    # the best we can do.
    steps += 11

    # pre setup phase, post install
    steps += 2

    # realmd, maybe
    if willRunRealmd:
        steps += 1

    # bootloader, maybe
    if willInstallBootloader:
        steps += 1

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        progress_init(steps+1)

        with progress_report(N_("Waiting for %s threads to finish") % (threadMgr.running-1)):
            for message in ("Thread %s is running" % n for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()
    else:
        progress_init(steps)

    with progress_report(N_("Setting up the installation environment")):
        ksdata.firstboot.setup(storage, ksdata, instClass)
        ksdata.addons.setup(storage, ksdata, instClass)

    storage.updateKSData()  # this puts custom storage info into ksdata

    # Do partitioning.
    payload.preStorage()

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    step_clbk = lambda clbk_data: progress_step(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(clbk_data.msg,
                                                           clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(create_format_pre=message_clbk,
                                                            create_format_post=step_clbk,
                                                            resize_format_pre=message_clbk,
                                                            resize_format_post=step_clbk,
                                                            wait_for_entropy=entropy_wait_clbk)

    turnOnFilesystems(storage, mountOnly=flags.flags.dirInstall, callbacks=callbacks_reg)
    payload.writeStorageEarly()

    # Run %pre-install scripts with the filesystem mounted and no packages
    with progress_report(N_("Running pre-installation scripts")):
        runPreInstallScripts(ksdata.scripts)

    # Do packaging.

    # Discover information about realms to join,
    # to determine additional packages
    if willRunRealmd:
        with progress_report(N_("Discovering realm to join")):
            ksdata.realm.setup()

    # Check for additional packages
    ksdata.authconfig.setup()
    ksdata.firewall.setup()
    ksdata.network.setup()
    # Setup timezone and add chrony as package if timezone was set in KS
    # and "-chrony" wasn't in packages section and/or --nontp wasn't set.
    ksdata.timezone.setup(ksdata)

    # make name resolution work for rpm scripts in chroot
    if flags.can_touch_runtime_system("copy /etc/resolv.conf to sysroot"):
        network.copyFileToPath("/etc/resolv.conf", iutil.getSysroot())

    # anaconda requires storage packages in order to make sure the target
    # system is bootable and configurable, and some other packages in order
    # to finish setting up the system.
    packages = storage.packages + ksdata.realm.packages
    packages += ksdata.authconfig.packages + ksdata.firewall.packages + ksdata.network.packages

    if willInstallBootloader:
        packages += storage.bootloader.packages

    # don't try to install packages from the install class' ignored list and the
    # explicitly excluded ones (user takes the responsibility)
    packages = [p for p in packages
                if p not in instClass.ignoredPackages and p not in ksdata.packages.excludedList]
    payload.preInstall(packages=packages, groups=payload.languageGroups())
    payload.install()

    payload.writeStorageLate()

    # Do bootloader.
    if willInstallBootloader:
        with progress_report(N_("Installing boot loader")):
            writeBootLoader(storage, payload, instClass, ksdata)

    with progress_report(N_("Performing post-installation setup tasks")):
        payload.postInstall()

    progress_complete()
Beispiel #23
0
    def install(self):
        progress_message(N_('Starting package installation process'))

        # Add the rpm macros to the global transaction environment
        for macro in self.rpmMacros:
            rpm.addMacro(macro[0], macro[1])

        if self.install_device:
            self._setupMedia(self.install_device)
        try:
            self.checkSoftwareSelection()
            self._download_location = self._pick_download_location()
        except payload.PayloadError as e:
            if errors.errorHandler.cb(e) == errors.ERROR_RAISE:
                log.error("Installation failed: %r", e)
                _failure_limbo()

        pkgs_to_download = self._base.transaction.install_set
        log.info('Downloading packages to %s.', self._download_location)
        progressQ.send_message(_('Downloading packages'))
        progress = DownloadProgress()
        try:
            self._base.download_packages(pkgs_to_download, progress)
        except dnf.exceptions.DownloadError as e:
            msg = 'Failed to download the following packages: %s' % str(e)
            exc = payload.PayloadInstallError(msg)
            if errors.errorHandler.cb(exc) == errors.ERROR_RAISE:
                log.error("Installation failed: %r", exc)
                _failure_limbo()

        log.info('Downloading packages finished.')

        pre_msg = (N_("Preparing transaction from installation source"))
        progress_message(pre_msg)

        queue_instance = multiprocessing.Queue()
        process = multiprocessing.Process(target=do_transaction,
                                          args=(self._base, queue_instance))
        process.start()
        (token, msg) = queue_instance.get()
        # When the installation works correctly it will get 'install' updates
        # followed by a 'post' message and then a 'quit' message.
        # If the installation fails it will send 'quit' without 'post'
        while token:
            if token == 'install':
                msg = _("Installing %s") % msg
                progressQ.send_message(msg)
            elif token == 'configure':
                msg = _("Configuring %s") % msg
                progressQ.send_message(msg)
            elif token == 'verify':
                msg = _("Verifying %s") % msg
                progressQ.send_message(msg)
            elif token == 'log':
                log.info(msg)
            elif token == 'post':
                msg = (N_("Performing post-installation setup tasks"))
                progressQ.send_message(msg)
            elif token == 'done':
                break  # Installation finished successfully
            elif token == 'quit':
                msg = (
                    "Payload error - DNF installation has ended up abruptly: %s"
                    % msg)
                raise payload.PayloadError(msg)
            elif token == 'error':
                exc = payload.PayloadInstallError("DNF error: %s" % msg)
                if errors.errorHandler.cb(exc) == errors.ERROR_RAISE:
                    log.error("Installation failed: %r", exc)
                    _failure_limbo()
            (token, msg) = queue_instance.get()

        process.join()
        self._base.close()
        if os.path.exists(self._download_location):
            log.info("Cleaning up downloaded packages: %s",
                     self._download_location)
            shutil.rmtree(self._download_location)
        else:
            # Some installation sources, such as NFS, don't need to download packages to
            # local storage, so the download location might not always exist. So for now
            # warn about this, at least until the RFE in bug 1193121 is implemented and
            # we don't have to care about clearing the download location ourselves.
            log.warning("Can't delete nonexistent download location: %s",
                        self._download_location)
Beispiel #24
0
def downloadRoll(roll):
    (rollname, rollversion, rollarch, rollurl, diskid) = roll

    progress_message(_("Downloading Roll") + " '%s' " % (rollname))

    #
    # test if this roll is in rocks format
    #
    isrocksroll = 1

    u = string.split(rollurl, '/')
    if len(u) > 2 and u[2] == '127.0.0.1':
        #
        # all CDs and DVDs will have the loopback IP address as the
        # the host name, so let's see if a specific directory exists
        # that will indicate to us if this is a rocks roll or
        # a or 'foreign' roll
        #
        p = os.path.join('/mnt/cdrom', rollname, rollversion, rollarch)
        if not os.path.exists(p):
            isrocksroll = 0

    path = os.path.join(rollname, rollversion, rollarch)

    cmd = '/opt/rocks/bin/rocks report distro'
    for line in os.popen(cmd).readlines():
        distrodir = line[:-1]

    localpath = '%s/%s/rolls/%s' % (iutil.getSysroot(), distrodir, path)

    if isrocksroll:
        url = '%s' % os.path.join(rollurl, path)
    else:
        #
        # this is not a rocks roll, so append the keywords 'RedHat'
        # and 'RPMS'  onto the local directory name. this allows us
        # to use CentOS and Scientific Linux CDs
        #
        localpath = os.path.join(localpath, 'RedHat', 'RPMS')

        #
        # change the url to point to the RPMS directory
        #
        cdtree = rocks.file.Tree('/mnt/cdrom')
        dirpath = ''
        for dir in cdtree.getDirs():
            d = string.split(dir, '/')
            if d[-1] == 'RPMS':
                dirpath = dir
                break

        url = os.path.join('http://127.0.0.1/mnt/cdrom', dirpath)

    cutdirs = len(string.split(url[7:], '/'))
    if isrocksroll:
        #
        # for rolls in rocks format, make sure we copy all the
        # files from the roll (e.g., 'RedHat' and 'base' directories)
        # this is useful for the kernel roll.
        #
        cutdirs -= 1

    subprocess.call('mkdir -p %s' % localpath, shell=True)
    os.chdir(localpath)

    if os.path.exists('/tmp/updates/rocks/bin/wget'):
        wget = '/tmp/updates/rocks/bin/wget'
    else:
        wget = '/usr/bin/wget'

    #
    # add resiliency flags to wget
    #
    flags = '--dns-timeout=3 --connect-timeout=3 --read-timeout=10 '
    flags += '--tries=3'

    cmd = '%s -m -nv -np -nH %s --cut-dirs=%d %s' \
     % (wget, flags, cutdirs, url)
    cmd += ' >> /tmp/wget.debug'
    subprocess.call(cmd, shell=True)

    subprocess.call('echo "%s" >> /tmp/wget.debug' % (cmd), shell=True)

    return
Beispiel #25
0
def doConfiguration(storage, payload, ksdata, instClass):
    """Configure the installed system."""

    configuration_queue = TaskQueue("Configuration queue")
    # connect progress reporting
    configuration_queue.queue_started.connect(lambda x: progress_message(x.status_message))
    configuration_queue.task_completed.connect(lambda x: progress_step(x.name))

    # schedule the execute methods of ksdata that require an installed system to be present
    os_config = TaskQueue("Installed system configuration", N_("Configuring installed system"))
    os_config.append(Task("Configure authselect", ksdata.authselect.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure SELinux", ksdata.selinux.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure first boot tasks", ksdata.firstboot.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure services", ksdata.services.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure keyboard", ksdata.keyboard.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure timezone", ksdata.timezone.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure language", ksdata.lang.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure firewall", ksdata.firewall.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure X", ksdata.xconfig.execute, (storage, ksdata, instClass)))
    configuration_queue.append(os_config)

    # schedule network configuration (if required)
    if conf.system.provides_network_config:
        network_config = TaskQueue("Network configuration", N_("Writing network configuration"))
        network_config.append(Task("Network configuration",
                                   ksdata.network.execute, (storage, payload, ksdata, instClass)))
        configuration_queue.append(network_config)

    # creating users and groups requires some pre-configuration.
    u = Users()
    user_config = TaskQueue("User creation", N_("Creating users"))
    user_config.append(Task("Configure root", ksdata.rootpw.execute, (storage, ksdata, instClass, u)))
    user_config.append(Task("Configure user groups", ksdata.group.execute, (storage, ksdata, instClass, u)))
    user_config.append(Task("Configure user", ksdata.user.execute, (storage, ksdata, instClass, u)))
    user_config.append(Task("Configure SSH key", ksdata.sshkey.execute, (storage, ksdata, instClass, u)))
    configuration_queue.append(user_config)

    # Anaconda addon configuration
    addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons"))
    addon_config.append(Task("Configure Anaconda addons", ksdata.addons.execute, (storage, ksdata, instClass, u, payload)))
    configuration_queue.append(addon_config)

    # Initramfs generation
    generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs"))
    generate_initramfs.append(Task("Generate initramfs", payload.recreateInitrds))

    # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is
    # recreated after the first writeBootLoader call. This reruns it after the new initrd has
    # been created, fixing the kernel root and subvol args and adding the missing initrd entry.
    boot_on_btrfs = isinstance(storage.mountpoints.get("/"), BTRFSDevice)

    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED

    if isinstance(payload, LiveImagePayload) and boot_on_btrfs and bootloader_enabled:
        generate_initramfs.append(Task("Write BTRFS bootloader fix", writeBootLoader, (storage, payload, instClass, ksdata)))
    configuration_queue.append(generate_initramfs)

    # join a realm (if required)
    if ksdata.realm.discovered:
        join_realm = TaskQueue("Realm join", N_("Joining realm: %s") % ksdata.realm.discovered)
        join_realm.append(Task("Join a realm", ksdata.realm.execute, (storage, ksdata, instClass)))
        configuration_queue.append(join_realm)

    post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts"))
    post_scripts.append(Task("Run post installation scripts", runPostScripts, (ksdata.scripts,)))
    configuration_queue.append(post_scripts)

    # setup kexec reboot if requested
    if flags.flags.kexec:
        kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec"))
        kexec_setup.append(Task("Setup kexec", setup_kexec))
        configuration_queue.append(kexec_setup)

    # write anaconda related configs & kickstarts
    write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts"))

    # Write the kickstart file to the installed system (or, copy the input
    # kickstart file over if one exists).
    if flags.flags.nosave_output_ks:
        # don't write the kickstart file to the installed system if this has
        # been disabled by the nosave option
        log.warning("Writing of the output kickstart to installed system has been disabled"
                    " by the nosave option.")
    else:
       # write anaconda related configs & kickstarts
        write_configs.append(Task("Store kickstarts", _writeKS, (ksdata,)))

    # Write out the user interaction config file.
    #
    # But make sure it's not written out in the image and directory installation mode,
    # as that might result in spokes being inadvertently hidden when the actual installation
    # starts from the generate image or directory contents.
    if conf.target.is_image:
        log.info("Not writing out user interaction config file due to image install mode.")
    elif conf.target.is_directory:
        log.info("Not writing out user interaction config file due to directory install mode.")
    else:
        write_configs.append(Task("Store user interaction config", screen_access.sam.write_out_config_file))

    # only add write_configs to the main queue if we actually store some kickstarts/configs
    if write_configs.task_count:
        configuration_queue.append(write_configs)

    # notify progress tracking about the number of steps
    progress_init(configuration_queue.task_count)
    # log contents of the main task queue
    log.info(configuration_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = util.item_counter(configuration_queue.queue_count)
    task_started_counter = util.item_counter(configuration_queue.task_count)
    task_completed_counter = util.item_counter(configuration_queue.task_count)
    configuration_queue.queue_started.connect(lambda x: log.info("Queue started: %s (%s)", x.name, next(queue_counter)))
    configuration_queue.task_started.connect(lambda x: log.info("Task started: %s (%s)", x.name, next(task_started_counter)))
    configuration_queue.task_completed.connect(lambda x: log.debug("Task completed: %s (%s) (%1.1f s)",
                                                                   x.name, next(task_completed_counter),
                                                                   x.elapsed_time))
    # start the task queue
    configuration_queue.start()
    # done
    progress_complete()
def doInstall(storage, payload, ksdata, instClass):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    willRunRealmd = ksdata.realm.join_realm
    willInstallBootloader = not flags.flags.dirInstall and (
        not ksdata.bootloader.disabled and ksdata.bootloader != "none")

    # First save system time to HW clock.
    if flags.can_touch_runtime_system("save system time to HW clock"):
        timezone.save_hw_clock(ksdata.timezone)

    # We really only care about actions that affect filesystems, since
    # those are the ones that take the most time.
    steps = len(storage.devicetree.actions.find(action_type="create", object_type="format")) + \
            len(storage.devicetree.actions.find(action_type="resize", object_type="format"))

    # Update every 10% of packages installed.  We don't know how many packages
    # we are installing until it's too late (see realmd later on) so this is
    # the best we can do.
    steps += 11

    # pre setup phase, post install
    steps += 2

    # realmd, maybe
    if willRunRealmd:
        steps += 1

    # bootloader, maybe
    if willInstallBootloader:
        steps += 1

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        progress_init(steps + 1)

        with progress_report(
                N_("Waiting for %s threads to finish") %
            (threadMgr.running - 1)):
            for message in ("Thread %s is running" % n
                            for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()
    else:
        progress_init(steps)

    with progress_report(N_("Setting up the installation environment")):
        ksdata.firstboot.setup(storage, ksdata, instClass)
        ksdata.addons.setup(storage, ksdata, instClass, payload)

    storage.update_ksdata()  # this puts custom storage info into ksdata

    # Do partitioning.
    payload.preStorage()

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    step_clbk = lambda clbk_data: progress_step(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(
        clbk_data.msg, clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(
        create_format_pre=message_clbk,
        create_format_post=step_clbk,
        resize_format_pre=message_clbk,
        resize_format_post=step_clbk,
        wait_for_entropy=entropy_wait_clbk)

    turn_on_filesystems(storage,
                        mount_only=flags.flags.dirInstall,
                        callbacks=callbacks_reg)
    payload.writeStorageEarly()

    # Run %pre-install scripts with the filesystem mounted and no packages
    with progress_report(N_("Running pre-installation scripts")):
        runPreInstallScripts(ksdata.scripts)

    # Do packaging.

    # Discover information about realms to join,
    # to determine additional packages
    if willRunRealmd:
        with progress_report(N_("Discovering realm to join")):
            ksdata.realm.setup()

    # Check for additional packages
    ksdata.authconfig.setup()
    ksdata.firewall.setup()
    ksdata.network.setup()
    # Setup timezone and add chrony as package if timezone was set in KS
    # and "-chrony" wasn't in packages section and/or --nontp wasn't set.
    ksdata.timezone.setup(ksdata)

    # make name resolution work for rpm scripts in chroot
    if flags.can_touch_runtime_system("copy /etc/resolv.conf to sysroot"):
        network.copyFileToPath("/etc/resolv.conf", iutil.getSysroot())

    # anaconda requires storage packages in order to make sure the target
    # system is bootable and configurable, and some other packages in order
    # to finish setting up the system.
    packages = storage.packages + ksdata.realm.packages
    packages += ksdata.authconfig.packages + ksdata.firewall.packages + ksdata.network.packages

    if willInstallBootloader:
        packages += storage.bootloader.packages

    # don't try to install packages from the install class' ignored list and the
    # explicitly excluded ones (user takes the responsibility)
    packages = [
        p for p in packages if p not in instClass.ignoredPackages
        and p not in ksdata.packages.excludedList
    ]
    payload.preInstall(packages=packages, groups=payload.languageGroups())
    payload.install()

    payload.writeStorageLate()

    # Do bootloader.
    if willInstallBootloader:
        with progress_report(N_("Installing boot loader")):
            writeBootLoader(storage, payload, instClass, ksdata)

    with progress_report(N_("Performing post-installation setup tasks")):
        payload.postInstall()

    progress_complete()
Beispiel #27
0
def doInstall(storage, payload, ksdata):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED
    can_install_bootloader = not conf.target.is_directory and bootloader_enabled

    installation_queue = TaskQueue("Installation queue")
    # connect progress reporting
    installation_queue.queue_started.connect(lambda x: progress_message(x.status_message))
    installation_queue.task_completed.connect(lambda x: progress_step(x.name))

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        # it could be that the threads finish execution before the task is executed,
        # but that should not cause any issues

        def wait_for_all_treads():
            for message in ("Thread %s is running" % n for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()

        # Use a queue with a single task as only TaskQueues have the status_message
        # property used for setting the progress status in the UI.
        wait_for_threads = TaskQueue("Wait for threads to finish",
                                     N_("Waiting for %s threads to finish") % (threadMgr.running - 1))

        wait_for_threads.append(Task("Wait for all threads to finish", wait_for_all_treads))
        installation_queue.append(wait_for_threads)

    # Save system time to HW clock.
    # - this used to be before waiting on threads, but I don't think that's needed
    if conf.system.can_set_hardware_clock:
        # lets just do this as a top-level task - no
        save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock)
        installation_queue.append(save_hwclock)

    # setup the installation environment
    setup_environment = TaskQueue("Installation environment setup", N_("Setting up the installation environment"))
    setup_environment.append(Task("Setup addons", ksdata.addons.setup, (storage, ksdata, payload)))
    installation_queue.append(setup_environment)

    # Do partitioning.
    # Depending on current payload the storage might be apparently configured
    # either before or after package/payload installation.
    # So let's have two task queues - early storage & late storage.
    early_storage = TaskQueue("Early storage configuration", N_("Configuring storage"))

    # put custom storage info into ksdata
    early_storage.append(Task("Insert custom storage to ksdata",
                              task=update_storage_ksdata,
                              task_args=(storage, ksdata)))

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(clbk_data.msg,
                                                           clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(create_format_pre=message_clbk,
                                                            resize_format_pre=message_clbk,
                                                            wait_for_entropy=entropy_wait_clbk)
    if not conf.target.is_directory:
        early_storage.append(Task("Activate filesystems",
                                  task=turn_on_filesystems,
                                  task_args=(storage,),
                                  task_kwargs={"callbacks": callbacks_reg}))

    early_storage.append(Task("Mount filesystems", task=storage.mount_filesystems))

    if payload.needs_storage_configuration and not conf.target.is_directory:
        early_storage.append(Task("Write early storage",
                                  task=write_storage_configuration,
                                  task_args=(storage,)))

    installation_queue.append(early_storage)

    # Run %pre-install scripts with the filesystem mounted and no packages
    pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts"))
    pre_install_scripts.append(Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts,)))
    installation_queue.append(pre_install_scripts)

    # Do packaging.

    # Discover information about realms to join to determine the need for additional packages.
    realm_discover = TaskQueue("Realm discover", N_("Discovering realm to join"))
    realm_discover.append(Task("Discover realm to join", ksdata.realm.setup))
    installation_queue.append(realm_discover)

    # Check for other possibly needed additional packages.
    pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks"))
    pre_install.append(Task("Setup authselect", ksdata.authselect.setup))
    pre_install.append(Task("Setup firewall", ksdata.firewall.setup))
    pre_install.append(Task("Setup network", ksdata.network.setup))
    # Setup timezone and add chrony as package if timezone was set in KS
    # and "-chrony" wasn't in packages section and/or --nontp wasn't set.
    pre_install.append(Task("Setup timezone", ksdata.timezone.setup, (ksdata,)))

    # make name resolution work for rpm scripts in chroot
    if conf.system.provides_resolver_config:
        # we use a custom Task subclass as the sysroot path has to be resolved
        # only when the task is actually started, not at task creation time
        pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot"))

    def run_pre_install():
        """This means to gather what additional packages (if any) are needed & executing payload.pre_install()."""
        # anaconda requires storage packages in order to make sure the target
        # system is bootable and configurable, and some other packages in order
        # to finish setting up the system.
        payload.requirements.add_packages(storage.packages, reason="storage")
        payload.requirements.add_packages(ksdata.realm.packages, reason="realm")
        payload.requirements.add_packages(ksdata.authselect.packages, reason="authselect")
        payload.requirements.add_packages(ksdata.firewall.packages, reason="firewall")
        payload.requirements.add_packages(ksdata.network.packages, reason="network")
        payload.requirements.add_packages(ksdata.timezone.packages, reason="ntp", strong=False)

        if can_install_bootloader:
            payload.requirements.add_packages(storage.bootloader.packages, reason="bootloader")
        if flags.flags.cmdline.getbool("fips"):
            payload.requirements.add_packages(['/usr/bin/fips-mode-setup'], reason="compliance")

        payload.requirements.add_groups(payload.language_groups(), reason="language groups")
        payload.requirements.add_packages(payload.langpacks(), reason="langpacks", strong=False)
        payload.pre_install()

    pre_install.append(Task("Find additional packages & run pre_install()", run_pre_install))
    installation_queue.append(pre_install)

    payload_install = TaskQueue("Payload installation", N_("Installing."))
    payload_install.append(Task("Install the payload", payload.install))
    installation_queue.append(payload_install)

    # for some payloads storage is configured after the payload is installed
    if not payload.needs_storage_configuration:
        late_storage = TaskQueue("Late storage configuration", N_("Configuring storage"))
        late_storage.append(Task("Prepare mount targets",
                                 task=payload.prepare_mount_targets,
                                 task_args=(storage, )))

        if not conf.target.is_directory:
            late_storage.append(Task("Write late storage",
                                     task=write_storage_configuration,
                                     task_args=(storage, )))

        installation_queue.append(late_storage)

    # Do bootloader.
    if can_install_bootloader:
        bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader"))
        bootloader_install.append(Task("Install bootloader", write_boot_loader, (storage, payload)))
        installation_queue.append(bootloader_install)

    post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks")))
    post_install.append(Task("Run post-installation setup tasks", payload.post_install))
    installation_queue.append(post_install)

    # Create snapshot
    snapshot_proxy = STORAGE.get_proxy(SNAPSHOT)

    if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL):
        snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots"))
        snapshot_requests = ksdata.snapshot.get_requests(SNAPSHOT_WHEN_POST_INSTALL)
        snapshot_task = SnapshotCreateTask(storage, snapshot_requests, SNAPSHOT_WHEN_POST_INSTALL)
        snapshot_creation.append(Task("Create post-install snapshots", snapshot_task.run))
        installation_queue.append(snapshot_creation)

    # notify progress tracking about the number of steps
    progress_init(installation_queue.task_count)
    # log contents of the main task queue
    log.info(installation_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = util.item_counter(installation_queue.queue_count)
    task_started_counter = util.item_counter(installation_queue.task_count)
    task_completed_counter = util.item_counter(installation_queue.task_count)
    installation_queue.queue_started.connect(lambda x: log.info("Queue started: %s (%s)", x.name, next(queue_counter)))
    installation_queue.task_started.connect(lambda x: log.info("Task started: %s (%s)", x.name, next(task_started_counter)))
    installation_queue.task_completed.connect(lambda x: log.debug("Task completed: %s (%s) (%1.1f s)",
                                                                  x.name, next(task_completed_counter),
                                                                  x.elapsed_time))
    # start the task queue
    installation_queue.start()
    # done
    progress_complete()
Beispiel #28
0
def doInstall(storage, payload, ksdata, instClass):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    willRunRealmd = ksdata.realm.join_realm
    willInstallBootloader = not flags.flags.dirInstall and (
        not ksdata.bootloader.disabled and ksdata.bootloader != "none")

    # First save system time to HW clock.
    if flags.can_touch_runtime_system("save system time to HW clock"):
        timezone.save_hw_clock(ksdata.timezone)

    # We really only care about actions that affect filesystems, since
    # those are the ones that take the most time.
    steps = len(storage.devicetree.findActions(action_type="create", object_type="format")) + \
            len(storage.devicetree.findActions(action_type="resize", object_type="format"))

    # pre setup phase, pre install, post install
    steps += 3

    # realmd, maybe
    if willRunRealmd:
        steps += 1

    # bootloader, maybe
    if willInstallBootloader:
        steps += 1

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        progress_init(steps + 1)

        with progress_report(
                _("Waiting for %s threads to finish") %
            (threadMgr.running - 1)):
            map(log.debug,
                ("Thread %s is running" % n for n in threadMgr.names))
            threadMgr.wait_all()
    else:
        progress_init(steps)

    with progress_report(_("Setting up the installation environment")):
        ksdata.firstboot.setup(storage, ksdata, instClass)
        ksdata.addons.setup(storage, ksdata, instClass, payload)

    storage.updateKSData()  # this puts custom storage info into ksdata

    # Do partitioning.
    payload.preStorage()

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    step_clbk = lambda clbk_data: progress_step(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(
        clbk_data.msg, clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(
        create_format_pre=message_clbk,
        create_format_post=step_clbk,
        resize_format_pre=message_clbk,
        resize_format_post=step_clbk,
        wait_for_entropy=entropy_wait_clbk)

    turnOnFilesystems(storage,
                      mountOnly=flags.flags.dirInstall,
                      callbacks=callbacks_reg)
    write_storage_late = (flags.flags.livecdInstall or ksdata.ostreesetup.seen
                          or ksdata.method.method == "liveimg")
    if not write_storage_late and not flags.flags.dirInstall:
        storage.write()

    # Run %pre-install scripts with the filesystem mounted and no packages
    with progress_report(_("Running pre-installation scripts")):
        runPreInstallScripts(ksdata.scripts)

    # Do packaging.

    # Discover information about realms to join,
    # to determine additional packages
    if willRunRealmd:
        with progress_report(_("Discovering realm to join")):
            ksdata.realm.setup()

    # make name resolution work for rpm scripts in chroot
    if flags.can_touch_runtime_system("copy /etc/resolv.conf to sysroot"):
        network.copyFileToPath("/etc/resolv.conf", iutil.getSysroot())

    # Check for additional packages
    ksdata.authconfig.setup()
    ksdata.firewall.setup()
    # Setup timezone and add chrony as package if timezone was set in KS
    # and "-chrony" wasn't in packages section and/or --nontp wasn't set.
    ksdata.timezone.setup(ksdata)

    # anaconda requires storage packages in order to make sure the target
    # system is bootable and configurable, and some other packages in order
    # to finish setting up the system.
    packages = storage.packages + ksdata.realm.packages
    packages += ksdata.authconfig.packages + ksdata.firewall.packages

    if willInstallBootloader:
        packages += storage.bootloader.packages

    if network.is_using_team_device():
        packages.append("teamd")

    # don't try to install packages from the install class' ignored list and the
    # explicitly excluded ones (user takes the responsibility)
    packages = [
        p for p in packages if p not in instClass.ignoredPackages
        and p not in ksdata.packages.excludedList
    ]
    payload.preInstall(packages=packages, groups=payload.languageGroups())
    payload.install()

    if write_storage_late and not flags.flags.dirInstall:
        if iutil.getSysroot() != iutil.getTargetPhysicalRoot():
            blivet.setSysroot(iutil.getTargetPhysicalRoot(),
                              iutil.getSysroot())

            # Now that we have the FS layout in the target, umount
            # things that were in the legacy sysroot, and put them in
            # the target root, except for the physical /.  First,
            # unmount all target filesystems.
            storage.umountFilesystems()

            # Explicitly mount the root on the physical sysroot
            rootmnt = storage.mountpoints.get('/')
            rootmnt.setup()
            rootmnt.format.setup(options=rootmnt.format.options,
                                 chroot=iutil.getTargetPhysicalRoot())

            payload.prepareMountTargets(storage)

            # Everything else goes in the target root, including /boot
            # since the bootloader code will expect to find /boot
            # inside the chroot.
            storage.mountFilesystems(skipRoot=True)
        storage.write()

    # Do bootloader.
    if willInstallBootloader:
        with progress_report(_("Installing boot loader")):
            writeBootLoader(storage, payload, instClass, ksdata)

    with progress_report(_("Performing post-installation setup tasks")):
        payload.postInstall()

    progress_complete()