def test_empty_task_queue(self):
        """Check that an empty task queue works correctly."""
        # first check if empty task queue works correctly
        task_queue = TaskQueue("foo", status_message="foo status message")
        assert task_queue.name == "foo"
        assert task_queue.status_message == "foo status message"
        assert task_queue.task_count == 0
        assert task_queue.queue_count == 0
        assert task_queue.summary == dedent("""
            Top-level task queue: foo
            Number of task queues: 0
            Number of tasks: 0
            Task & task group listing:
        """).strip()

        # connect started/completed callbacks
        task_queue.started.connect(self._set_var_4)
        task_queue.completed.connect(self._set_var_5)

        # it should be possible to start an empty task queue
        task_queue.start()

        # check state after the run
        assert task_queue.task_count == 0
        assert task_queue.queue_count == 0

        # started/completed signals should still be triggered, even
        # if the queue is empty
        assert self._test_variable4 is task_queue
        assert self._test_variable5 is task_queue

        # the nested queue/task signals should not be triggered if
        # the queue is empty
        assert self._test_variable6 is None
Exemple #2
0
def run_installation(payload, ksdata):
    """Run the complete installation."""
    queue = TaskQueue("Complete installation queue")
    queue.append(_prepare_installation(payload, ksdata))
    queue.append(_prepare_configuration(payload, ksdata))

    # notify progress tracking about the number of steps
    progress_init(queue.task_count)

    # log contents of the main task queue
    log.info(queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = util.item_counter(queue.queue_count)
    task_started_counter = util.item_counter(queue.task_count)
    task_completed_counter = util.item_counter(queue.task_count)
    queue.queue_started.connect(lambda x: log.info("Queue started: %s (%s)", x.
                                                   name, next(queue_counter)))
    queue.task_started.connect(lambda x: log.info(
        "Task started: %s (%s)", x.name, next(task_started_counter)))
    queue.task_completed.connect(
        lambda x: log.debug("Task completed: %s (%s) (%1.1f s)", x.name,
                            next(task_completed_counter), x.elapsed_time))

    # start the task queue
    queue.start()

    # done
    progress_complete()
    def test_empty_task_queue(self):
        """Check that an empty task queue works correctly."""
        # first check if empty task queue works correctly
        task_queue = TaskQueue("foo", status_message="foo status message")
        assert task_queue.name == "foo"
        assert task_queue.status_message == "foo status message"
        assert task_queue.task_count == 0
        assert task_queue.queue_count == 0
        assert task_queue.current_task_number is None
        assert task_queue.current_queue_number is None
        assert task_queue.progress == 0.0
        assert not task_queue.running
        assert not task_queue.done
        assert len(task_queue.summary) > 0
        # connect started/completed callbacks

        # these should be triggered
        task_queue.started.connect(self._set_var_4)
        task_queue.completed.connect(self._set_var_5)
        # these should not
        should_not_run = lambda x: self._set_var_6("anaconda")
        task_queue.task_started.connect(should_not_run)
        task_queue.task_completed.connect(should_not_run)
        task_queue.queue_started.connect(should_not_run)
        task_queue.queue_completed.connect(should_not_run)

        # it should be possible to start an empty task queue
        task_queue.start()
        # check state after the run
        assert not task_queue.running
        assert task_queue.done
        assert task_queue.current_queue_number is None
        assert task_queue.current_task_number is None
        assert task_queue.task_count == 0
        assert task_queue.queue_count == 0
        # started/completed signals should still be triggered, even
        # if the queue is empty
        assert self._test_variable4 is task_queue
        assert self._test_variable5 is task_queue
        # the nested queue/task signals should not be triggered if
        # the queue is empty
        assert self._test_variable6 is None
    def empty_task_queue_test(self):
        """Check that an empty task queue works correctly."""
        # first check if empty task queue works correctly
        task_queue = TaskQueue("foo", status_message="foo status message")
        self.assertEqual(task_queue.name, "foo")
        self.assertEqual(task_queue.status_message, "foo status message")
        self.assertEqual(task_queue.task_count, 0)
        self.assertEqual(task_queue.queue_count, 0)
        self.assertIsNone(task_queue.current_task_number)
        self.assertIsNone(task_queue.current_queue_number)
        self.assertEqual(task_queue.progress, 0.0)
        self.assertFalse(task_queue.running)
        self.assertFalse(task_queue.done)
        self.assertGreater(len(task_queue.summary), 0)
        # connect started/completed callbacks

        # these should be triggered
        task_queue.started.connect(self._set_var_4)
        task_queue.completed.connect(self._set_var_5)
        # these should not
        should_not_run = lambda x: self._set_var_6("anaconda")
        task_queue.task_started.connect(should_not_run)
        task_queue.task_completed.connect(should_not_run)
        task_queue.queue_started.connect(should_not_run)
        task_queue.queue_completed.connect(should_not_run)

        # it should be possible to start an empty task queue
        task_queue.start()
        # check state after the run
        self.assertFalse(task_queue.running)
        self.assertTrue(task_queue.done)
        self.assertIsNone(task_queue.current_queue_number)
        self.assertIsNone(task_queue.current_task_number)
        self.assertEqual(task_queue.task_count, 0)
        self.assertEqual(task_queue.queue_count, 0)
        # started/completed signals should still be triggered, even
        # if the queue is empty
        self.assertIs(self._test_variable4, task_queue)
        self.assertIs(self._test_variable5, task_queue)
        # the nested queue/task signals should not be triggered if
        # the queue is empty
        self.assertIsNone(self._test_variable6)
Exemple #5
0
    def _run_installation(self, payload, ksdata):
        """Run the complete installation."""
        # before building the install task queue make
        # sure no backgrond processing threads are running and
        # the Anaconda internal state is thus final
        self._wait_for_threads_to_finish()

        queue = TaskQueue("Complete installation queue")
        queue.append(self._prepare_installation(payload, ksdata))
        queue.append(self._prepare_configuration(payload, ksdata))

        # Set the progress reporting callback of the payload class.
        # FIXME: This is a temporary workaround.
        payload._progress_cb = lambda step, msg: progress_message(msg)

        # Set the progress reporting callback of the DBus tasks.
        # FIXME: This is a temporary workaround.
        for item in queue.nested_items:
            if isinstance(item, DBusTask):
                item._progress_cb = lambda step, msg: progress_message(msg)

        # notify progress tracking about the number of steps
        progress_init(queue.task_count)

        # log contents of the main task queue
        log.info(queue.summary)

        # log tasks and queues when they are started
        # - note that we are using generators to add the counter
        queue_counter = util.item_counter(queue.queue_count)
        task_started_counter = util.item_counter(queue.task_count)
        task_completed_counter = util.item_counter(queue.task_count)
        queue.queue_started.connect(lambda x: log.info(
            "Queue started: %s (%s)", x.name, next(queue_counter)))
        queue.task_started.connect(lambda x: log.info(
            "Task started: %s (%s)", x.name, next(task_started_counter)))
        queue.task_completed.connect(
            lambda x: log.debug("Task completed: %s (%s) (%1.1f s)", x.name,
                                next(task_completed_counter), x.elapsed_time))

        # start the task queue
        queue.start()

        # done
        progress_complete()
        # this message is automatically detected by QE tools, do not change it lightly
        log.info("All tasks in the installation queue are done. "
                 "Installation successfully finished.")
Exemple #6
0
def _prepare_installation(payload, ksdata):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    installation_queue = TaskQueue("Installation queue")
    # connect progress reporting
    installation_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    installation_queue.task_completed.connect(lambda x: progress_step(x.name))

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        # it could be that the threads finish execution before the task is executed,
        # but that should not cause any issues

        def wait_for_all_treads():
            for message in ("Thread %s is running" % n
                            for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()

        # Use a queue with a single task as only TaskQueues have the status_message
        # property used for setting the progress status in the UI.
        wait_for_threads = TaskQueue(
            "Wait for threads to finish",
            N_("Waiting for %s threads to finish") % (threadMgr.running - 1))

        wait_for_threads.append(
            Task("Wait for all threads to finish", wait_for_all_treads))
        installation_queue.append(wait_for_threads)

    # Save system time to HW clock.
    # - this used to be before waiting on threads, but I don't think that's needed
    if conf.system.can_set_hardware_clock:
        # lets just do this as a top-level task - no
        save_hwclock = Task("Save system time to HW clock",
                            timezone.save_hw_clock)
        installation_queue.append(save_hwclock)

    # setup the installation environment
    setup_environment = TaskQueue(
        "Installation environment setup",
        N_("Setting up the installation environment"))
    setup_environment.append(
        Task("Setup addons", ksdata.addons.setup, (None, ksdata, payload)))

    boss_proxy = BOSS.get_proxy()
    setup_environment.append_dbus_tasks(
        BOSS, [boss_proxy.ConfigureRuntimeWithTask()])

    installation_queue.append(setup_environment)

    # Do partitioning.
    # Depending on current payload the storage might be apparently configured
    # either before or after package/payload installation.
    # So let's have two task queues - early storage & late storage.
    storage_proxy = STORAGE.get_proxy()
    early_storage = TaskQueue("Early storage configuration",
                              N_("Configuring storage"))
    early_storage.append_dbus_tasks(STORAGE, storage_proxy.InstallWithTasks())

    if payload.type == PAYLOAD_TYPE_DNF:
        conf_task = storage_proxy.WriteConfigurationWithTask()
        early_storage.append_dbus_tasks(STORAGE, [conf_task])

    installation_queue.append(early_storage)

    # Run %pre-install scripts with the filesystem mounted and no packages
    pre_install_scripts = TaskQueue("Pre-install scripts",
                                    N_("Running pre-installation scripts"))
    pre_install_scripts.append(
        Task("Run %pre-install scripts", runPreInstallScripts,
             (ksdata.scripts, )))
    installation_queue.append(pre_install_scripts)

    # Do various pre-installation tasks
    # - try to discover a realm (if any)
    # - check for possibly needed additional packages.
    pre_install = TaskQueue("Pre install tasks",
                            N_("Running pre-installation tasks"))

    # make name resolution work for rpm scripts in chroot
    if conf.system.provides_resolver_config:
        # we use a custom Task subclass as the sysroot path has to be resolved
        # only when the task is actually started, not at task creation time
        pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot"))

    # realm discovery
    security_proxy = SECURITY.get_proxy()
    pre_install.append_dbus_tasks(SECURITY,
                                  [security_proxy.DiscoverRealmWithTask()])

    # Set up FIPS for the payload installation.
    fips_task = security_proxy.PreconfigureFIPSWithTask(payload.type)
    pre_install.append_dbus_tasks(SECURITY, [fips_task])

    # Install the payload.
    pre_install.append(
        Task("Find additional packages & run pre_install()",
             payload.pre_install))
    installation_queue.append(pre_install)

    payload_install = TaskQueue("Payload installation", N_("Installing."))
    payload_install.append(Task("Install the payload", payload.install))
    installation_queue.append(payload_install)

    # for some payloads storage is configured after the payload is installed
    if payload.type != PAYLOAD_TYPE_DNF:
        late_storage = TaskQueue("Late storage configuration",
                                 N_("Configuring storage"))
        conf_task = storage_proxy.WriteConfigurationWithTask()
        late_storage.append_dbus_tasks(STORAGE, [conf_task])
        installation_queue.append(late_storage)

    # Do bootloader.
    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_install = TaskQueue("Bootloader installation",
                                   N_("Installing boot loader"))

    def run_install_bootloader():
        tasks = bootloader_proxy.InstallBootloaderWithTasks(
            payload.type, payload.kernel_version_list)

        for task in tasks:
            sync_run_task(STORAGE.get_proxy(task))

    bootloader_install.append(
        Task("Install bootloader", run_install_bootloader))
    installation_queue.append(bootloader_install)

    post_install = TaskQueue("Post-installation setup tasks",
                             (N_("Performing post-installation setup tasks")))
    post_install.append(
        Task("Run post-installation setup tasks", payload.post_install))
    installation_queue.append(post_install)

    # Create snapshot
    snapshot_proxy = STORAGE.get_proxy(SNAPSHOT)

    if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL):
        snapshot_creation = TaskQueue("Creating post installation snapshots",
                                      N_("Creating snapshots"))
        snapshot_task = snapshot_proxy.CreateWithTask(
            SNAPSHOT_WHEN_POST_INSTALL)
        snapshot_creation.append_dbus_tasks(STORAGE, [snapshot_task])
        installation_queue.append(snapshot_creation)

    return installation_queue
Exemple #7
0
def _prepare_configuration(payload, ksdata):
    """Configure the installed system."""

    configuration_queue = TaskQueue("Configuration queue")
    # connect progress reporting
    configuration_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    configuration_queue.task_completed.connect(lambda x: progress_step(x.name))

    # schedule the execute methods of ksdata that require an installed system to be present
    os_config = TaskQueue("Installed system configuration",
                          N_("Configuring installed system"))

    # add installation tasks for the Security DBus module
    security_proxy = SECURITY.get_proxy()
    security_dbus_tasks = security_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(SECURITY, security_dbus_tasks)

    # add installation tasks for the Services DBus module
    services_proxy = SERVICES.get_proxy()
    services_dbus_tasks = services_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(SERVICES, services_dbus_tasks)

    # add installation tasks for the Timezone DBus module
    timezone_proxy = TIMEZONE.get_proxy()
    timezone_dbus_tasks = timezone_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(TIMEZONE, timezone_dbus_tasks)

    # add installation tasks for the Localization DBus module
    localization_proxy = LOCALIZATION.get_proxy()
    localization_dbus_tasks = localization_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(LOCALIZATION, localization_dbus_tasks)

    # add the Firewall configuration task
    firewall_proxy = NETWORK.get_proxy(FIREWALL)
    firewall_dbus_task = firewall_proxy.InstallWithTask()
    os_config.append_dbus_tasks(NETWORK, [firewall_dbus_task])

    configuration_queue.append(os_config)

    # schedule network configuration (if required)
    if conf.system.provides_network_config:
        overwrite = isinstance(payload, LiveImagePayload)
        network_config = TaskQueue("Network configuration",
                                   N_("Writing network configuration"))
        network_config.append(
            Task("Network configuration", network.write_configuration,
                 (overwrite, )))
        configuration_queue.append(network_config)

    # add installation tasks for the Users DBus module
    user_config = TaskQueue("User creation", N_("Creating users"))
    users_proxy = USERS.get_proxy()
    users_dbus_tasks = users_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(USERS, users_dbus_tasks)
    configuration_queue.append(user_config)

    # Anaconda addon configuration
    addon_config = TaskQueue("Anaconda addon configuration",
                             N_("Configuring addons"))

    # there is no longer a User class & addons should no longer need it
    # FIXME: drop user class parameter from the API & all known addons
    addon_config.append(
        Task("Configure Anaconda addons", ksdata.addons.execute,
             (None, ksdata, None, payload)))

    boss_proxy = BOSS.get_proxy()
    addon_config.append_dbus_tasks(BOSS, [boss_proxy.InstallSystemWithTask()])

    configuration_queue.append(addon_config)

    # Initramfs generation
    generate_initramfs = TaskQueue("Initramfs generation",
                                   N_("Generating initramfs"))
    generate_initramfs.append(
        Task("Generate initramfs", payload.recreate_initrds))

    # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is
    # recreated after the first writeBootLoader call. This reruns it after the new initrd has
    # been created, fixing the kernel root and subvol args and adding the missing initrd entry.
    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)

    if isinstance(payload, LiveImagePayload):
        btrfs_task = bootloader_proxy.FixBTRFSWithTask(
            payload.kernel_version_list)
        generate_initramfs.append_dbus_tasks(STORAGE, [btrfs_task])

    # Invoking zipl should be the last thing done on a s390x installation (see #1652727).
    zipl_task = bootloader_proxy.FixZIPLWithTask()
    generate_initramfs.append_dbus_tasks(STORAGE, [zipl_task])
    configuration_queue.append(generate_initramfs)

    # realm join
    # - this can run only after network is configured in the target system chroot
    configuration_queue.append_dbus_tasks(SECURITY,
                                          [security_proxy.JoinRealmWithTask()])

    post_scripts = TaskQueue("Post installation scripts",
                             N_("Running post-installation scripts"))
    post_scripts.append(
        Task("Run post installation scripts", runPostScripts,
             (ksdata.scripts, )))
    configuration_queue.append(post_scripts)

    # setup kexec reboot if requested
    if flags.flags.kexec:
        kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec"))
        kexec_setup.append(Task("Setup kexec", setup_kexec))
        configuration_queue.append(kexec_setup)

    # write anaconda related configs & kickstarts
    write_configs = TaskQueue("Write configs and kickstarts",
                              N_("Storing configuration files and kickstarts"))

    # Write the kickstart file to the installed system (or, copy the input
    # kickstart file over if one exists).
    if flags.flags.nosave_output_ks:
        # don't write the kickstart file to the installed system if this has
        # been disabled by the nosave option
        log.warning(
            "Writing of the output kickstart to installed system has been disabled"
            " by the nosave option.")
    else:
        # write anaconda related configs & kickstarts
        write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, )))

    # only add write_configs to the main queue if we actually store some kickstarts/configs
    if write_configs.task_count:
        configuration_queue.append(write_configs)

    return configuration_queue
Exemple #8
0
def _prepare_installation(payload, ksdata):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    installation_queue = TaskQueue("Installation queue")
    # connect progress reporting
    installation_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    installation_queue.task_completed.connect(lambda x: progress_step(x.name))

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        # it could be that the threads finish execution before the task is executed,
        # but that should not cause any issues

        def wait_for_all_treads():
            for message in ("Thread %s is running" % n
                            for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()

        # Use a queue with a single task as only TaskQueues have the status_message
        # property used for setting the progress status in the UI.
        wait_for_threads = TaskQueue(
            "Wait for threads to finish",
            N_("Waiting for %s threads to finish") % (threadMgr.running - 1))

        wait_for_threads.append(
            Task("Wait for all threads to finish", wait_for_all_treads))
        installation_queue.append(wait_for_threads)

    # Save system time to HW clock.
    # - this used to be before waiting on threads, but I don't think that's needed
    if conf.system.can_set_hardware_clock:
        # lets just do this as a top-level task - no
        save_hwclock = Task("Save system time to HW clock",
                            timezone.save_hw_clock)
        installation_queue.append(save_hwclock)

    # setup the installation environment
    setup_environment = TaskQueue(
        "Installation environment setup",
        N_("Setting up the installation environment"))
    setup_environment.append(
        Task("Setup addons", ksdata.addons.setup, (None, ksdata, payload)))

    boss_proxy = BOSS.get_proxy()
    setup_environment.append_dbus_tasks(
        BOSS, [boss_proxy.ConfigureRuntimeWithTask()])

    installation_queue.append(setup_environment)

    # Do partitioning.
    # Depending on current payload the storage might be apparently configured
    # either before or after package/payload installation.
    # So let's have two task queues - early storage & late storage.
    storage_proxy = STORAGE.get_proxy()
    early_storage = TaskQueue("Early storage configuration",
                              N_("Configuring storage"))
    early_storage.append_dbus_tasks(STORAGE, storage_proxy.InstallWithTasks())

    if payload.needs_storage_configuration:
        conf_task = storage_proxy.WriteConfigurationWithTask()
        early_storage.append_dbus_tasks(STORAGE, [conf_task])

    installation_queue.append(early_storage)

    # Run %pre-install scripts with the filesystem mounted and no packages
    pre_install_scripts = TaskQueue("Pre-install scripts",
                                    N_("Running pre-installation scripts"))
    pre_install_scripts.append(
        Task("Run %pre-install scripts", runPreInstallScripts,
             (ksdata.scripts, )))
    installation_queue.append(pre_install_scripts)

    # Do various pre-installation tasks
    # - try to discover a realm (if any)
    # - check for possibly needed additional packages.
    pre_install = TaskQueue("Pre install tasks",
                            N_("Running pre-installation tasks"))
    # Setup timezone and add chrony as package if timezone was set in KS
    # and "-chrony" wasn't in packages section and/or --nontp wasn't set.
    timezone_proxy = TIMEZONE.get_proxy()
    ntp_excluded = timezone.NTP_PACKAGE in ksdata.packages.excludedList
    pre_install.append_dbus_tasks(
        TIMEZONE,
        [timezone_proxy.ConfigureNTPServiceEnablementWithTask(ntp_excluded)])

    # make name resolution work for rpm scripts in chroot
    if conf.system.provides_resolver_config:
        # we use a custom Task subclass as the sysroot path has to be resolved
        # only when the task is actually started, not at task creation time
        pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot"))

    # realm discovery
    security_proxy = SECURITY.get_proxy()
    pre_install.append_dbus_tasks(SECURITY,
                                  [security_proxy.DiscoverRealmWithTask()])

    def run_pre_install():
        """This means to gather what additional packages (if any) are needed & executing payload.pre_install()."""
        # anaconda requires storage packages in order to make sure the target
        # system is bootable and configurable, and some other packages in order
        # to finish setting up the system.
        if kernel_arguments.is_enabled("fips"):
            payload.requirements.add_packages(['/usr/bin/fips-mode-setup'],
                                              reason="compliance")

        payload.requirements.add_groups(payload.language_groups(),
                                        reason="language groups")
        payload.requirements.add_packages(payload.langpacks(),
                                          reason="langpacks",
                                          strong=False)

        # add package requirements from modules
        # - iterate over all modules we know have valid package requirements
        # - add any requirements found to the payload requirement tracking
        modules_with_package_requirements = [
            SECURITY, NETWORK, TIMEZONE, STORAGE
        ]
        for module in modules_with_package_requirements:
            module_proxy = module.get_proxy()
            module_requirements = Requirement.from_structure_list(
                module_proxy.CollectRequirements())
            log.debug("Adding requirements for module %s : %s", module,
                      module_requirements)
            payload.requirements.add_requirements(module_requirements)

        payload.pre_install()

    pre_install.append(
        Task("Find additional packages & run pre_install()", run_pre_install))
    installation_queue.append(pre_install)

    payload_install = TaskQueue("Payload installation", N_("Installing."))
    payload_install.append(Task("Install the payload", payload.install))
    installation_queue.append(payload_install)

    # for some payloads storage is configured after the payload is installed
    if not payload.needs_storage_configuration:
        late_storage = TaskQueue("Late storage configuration",
                                 N_("Configuring storage"))
        conf_task = storage_proxy.WriteConfigurationWithTask()
        late_storage.append_dbus_tasks(STORAGE, [conf_task])
        installation_queue.append(late_storage)

    # Do bootloader.
    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_install = TaskQueue("Bootloader installation",
                                   N_("Installing boot loader"))

    def configure_bootloader():
        boot_task = bootloader_proxy.ConfigureWithTask(
            payload.kernel_version_list)
        sync_run_task(STORAGE.get_proxy(boot_task))

    if not payload.handles_bootloader_configuration:
        # FIXME: This is a temporary workaround, run the DBus task directly.
        bootloader_install.append(
            Task("Configure the bootloader", configure_bootloader))

    bootloader_install.append_dbus_tasks(STORAGE,
                                         [bootloader_proxy.InstallWithTask()])
    installation_queue.append(bootloader_install)

    post_install = TaskQueue("Post-installation setup tasks",
                             (N_("Performing post-installation setup tasks")))
    post_install.append(
        Task("Run post-installation setup tasks", payload.post_install))
    installation_queue.append(post_install)

    # Create snapshot
    snapshot_proxy = STORAGE.get_proxy(SNAPSHOT)

    if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL):
        snapshot_creation = TaskQueue("Creating post installation snapshots",
                                      N_("Creating snapshots"))
        snapshot_task = snapshot_proxy.CreateWithTask(
            SNAPSHOT_WHEN_POST_INSTALL)
        snapshot_creation.append_dbus_tasks(STORAGE, [snapshot_task])
        installation_queue.append(snapshot_creation)

    return installation_queue
Exemple #9
0
def doConfiguration(storage, payload, ksdata):
    """Configure the installed system."""

    configuration_queue = TaskQueue("Configuration queue")
    # connect progress reporting
    configuration_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    configuration_queue.task_completed.connect(lambda x: progress_step(x.name))

    # schedule the execute methods of ksdata that require an installed system to be present
    os_config = TaskQueue("Installed system configuration",
                          N_("Configuring installed system"))
    os_config.append(Task("Configure authselect", ksdata.authselect.execute))

    security_proxy = SECURITY.get_proxy()
    security_dbus_tasks = security_proxy.InstallWithTasks(util.getSysroot())
    # add one Task instance per DBUS task
    for dbus_task in security_dbus_tasks:
        task_proxy = SECURITY.get_proxy(dbus_task)
        os_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy, )))

    services_proxy = SERVICES.get_proxy()
    services_dbus_tasks = services_proxy.InstallWithTasks(util.getSysroot())
    # add one Task instance per DBUS task
    for dbus_task in services_dbus_tasks:
        task_proxy = SERVICES.get_proxy(dbus_task)
        os_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy, )))

    os_config.append(Task("Configure keyboard", ksdata.keyboard.execute))
    os_config.append(Task("Configure timezone", ksdata.timezone.execute))

    localization_proxy = LOCALIZATION.get_proxy()
    localization_dbus_tasks = localization_proxy.InstallWithTasks(
        util.getSysroot())
    # add one Task instance per DBUS task
    for dbus_task in localization_dbus_tasks:
        task_proxy = LOCALIZATION.get_proxy(dbus_task)
        os_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy, )))

    firewall_proxy = NETWORK.get_proxy(FIREWALL)
    firewall_dbus_task = firewall_proxy.InstallWithTask(util.getSysroot())
    task_proxy = NETWORK.get_proxy(firewall_dbus_task)
    os_config.append(Task(task_proxy.Name, sync_run_task, (task_proxy, )))

    configuration_queue.append(os_config)

    # schedule network configuration (if required)
    if conf.system.provides_network_config:
        network_config = TaskQueue("Network configuration",
                                   N_("Writing network configuration"))
        network_config.append(
            Task("Network configuration", ksdata.network.execute, (payload, )))
        configuration_queue.append(network_config)

    # creating users and groups requires some pre-configuration.
    user_config = TaskQueue("User creation", N_("Creating users"))

    users_proxy = USERS.get_proxy()
    users_dbus_tasks = users_proxy.InstallWithTasks(util.getSysroot())
    # add one Task instance per DBUS task
    for dbus_task in users_dbus_tasks:
        task_proxy = USERS.get_proxy(dbus_task)
        user_config.append(Task(task_proxy.Name, sync_run_task,
                                (task_proxy, )))
    configuration_queue.append(user_config)

    # Anaconda addon configuration
    addon_config = TaskQueue("Anaconda addon configuration",
                             N_("Configuring addons"))
    # there is no longer a User class & addons should no longer need it
    # FIXME: drop user class parameter from the API & all known addons
    addon_config.append(
        Task("Configure Anaconda addons", ksdata.addons.execute,
             (storage, ksdata, None, payload)))
    configuration_queue.append(addon_config)

    # Initramfs generation
    generate_initramfs = TaskQueue("Initramfs generation",
                                   N_("Generating initramfs"))
    generate_initramfs.append(
        Task("Generate initramfs", payload.recreate_initrds))

    # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is
    # recreated after the first writeBootLoader call. This reruns it after the new initrd has
    # been created, fixing the kernel root and subvol args and adding the missing initrd entry.
    boot_on_btrfs = isinstance(storage.mountpoints.get("/"), BTRFSDevice)

    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED

    if isinstance(payload,
                  LiveImagePayload) and boot_on_btrfs and bootloader_enabled:
        generate_initramfs.append(
            Task("Write BTRFS bootloader fix", write_boot_loader,
                 (storage, payload)))

    # Invoking zipl should be the last thing done on a s390x installation (see #1652727).
    if arch.is_s390() and not conf.target.is_directory and bootloader_enabled:
        generate_initramfs.append(
            Task("Rerun zipl", lambda: util.execInSysroot("zipl", [])))

    configuration_queue.append(generate_initramfs)

    # join a realm (if required)
    if ksdata.realm.discovered:
        join_realm = TaskQueue(
            "Realm join",
            N_("Joining realm: %s") % ksdata.realm.discovered)
        join_realm.append(Task("Join a realm", ksdata.realm.execute))
        configuration_queue.append(join_realm)

    post_scripts = TaskQueue("Post installation scripts",
                             N_("Running post-installation scripts"))
    post_scripts.append(
        Task("Run post installation scripts", runPostScripts,
             (ksdata.scripts, )))
    configuration_queue.append(post_scripts)

    # setup kexec reboot if requested
    if flags.flags.kexec:
        kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec"))
        kexec_setup.append(Task("Setup kexec", setup_kexec))
        configuration_queue.append(kexec_setup)

    # write anaconda related configs & kickstarts
    write_configs = TaskQueue("Write configs and kickstarts",
                              N_("Storing configuration files and kickstarts"))

    # Write the kickstart file to the installed system (or, copy the input
    # kickstart file over if one exists).
    if flags.flags.nosave_output_ks:
        # don't write the kickstart file to the installed system if this has
        # been disabled by the nosave option
        log.warning(
            "Writing of the output kickstart to installed system has been disabled"
            " by the nosave option.")
    else:
        # write anaconda related configs & kickstarts
        write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, )))

    # only add write_configs to the main queue if we actually store some kickstarts/configs
    if write_configs.task_count:
        configuration_queue.append(write_configs)

    # notify progress tracking about the number of steps
    progress_init(configuration_queue.task_count)
    # log contents of the main task queue
    log.info(configuration_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = util.item_counter(configuration_queue.queue_count)
    task_started_counter = util.item_counter(configuration_queue.task_count)
    task_completed_counter = util.item_counter(configuration_queue.task_count)
    configuration_queue.queue_started.connect(lambda x: log.info(
        "Queue started: %s (%s)", x.name, next(queue_counter)))
    configuration_queue.task_started.connect(lambda x: log.info(
        "Task started: %s (%s)", x.name, next(task_started_counter)))
    configuration_queue.task_completed.connect(
        lambda x: log.debug("Task completed: %s (%s) (%1.1f s)", x.name,
                            next(task_completed_counter), x.elapsed_time))
    # start the task queue
    configuration_queue.start()
    # done
    progress_complete()
Exemple #10
0
def doConfiguration(storage, payload, ksdata, instClass):
    """Configure the installed system."""

    configuration_queue = TaskQueue("Configuration queue")
    # connect progress reporting
    configuration_queue.queue_started.connect(lambda x: progress_message(x.status_message))
    configuration_queue.task_completed.connect(lambda x: progress_step(x.name))

    # schedule the execute methods of ksdata that require an installed system to be present
    os_config = TaskQueue("Installed system configuration", N_("Configuring installed system"))
    os_config.append(Task("Configure authselect", ksdata.authselect.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure SELinux", ksdata.selinux.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure first boot tasks", ksdata.firstboot.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure services", ksdata.services.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure keyboard", ksdata.keyboard.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure timezone", ksdata.timezone.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure language", ksdata.lang.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure firewall", ksdata.firewall.execute, (storage, ksdata, instClass)))
    os_config.append(Task("Configure X", ksdata.xconfig.execute, (storage, ksdata, instClass)))
    configuration_queue.append(os_config)

    # schedule network configuration (if required)
    if conf.system.provides_network_config:
        network_config = TaskQueue("Network configuration", N_("Writing network configuration"))
        network_config.append(Task("Network configuration",
                                   ksdata.network.execute, (storage, payload, ksdata, instClass)))
        configuration_queue.append(network_config)

    # creating users and groups requires some pre-configuration.
    u = Users()
    user_config = TaskQueue("User creation", N_("Creating users"))
    user_config.append(Task("Configure root", ksdata.rootpw.execute, (storage, ksdata, instClass, u)))
    user_config.append(Task("Configure user groups", ksdata.group.execute, (storage, ksdata, instClass, u)))
    user_config.append(Task("Configure user", ksdata.user.execute, (storage, ksdata, instClass, u)))
    user_config.append(Task("Configure SSH key", ksdata.sshkey.execute, (storage, ksdata, instClass, u)))
    configuration_queue.append(user_config)

    # Anaconda addon configuration
    addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons"))
    addon_config.append(Task("Configure Anaconda addons", ksdata.addons.execute, (storage, ksdata, instClass, u, payload)))
    configuration_queue.append(addon_config)

    # Initramfs generation
    generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs"))
    generate_initramfs.append(Task("Generate initramfs", payload.recreateInitrds))

    # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is
    # recreated after the first writeBootLoader call. This reruns it after the new initrd has
    # been created, fixing the kernel root and subvol args and adding the missing initrd entry.
    boot_on_btrfs = isinstance(storage.mountpoints.get("/"), BTRFSDevice)

    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED

    if isinstance(payload, LiveImagePayload) and boot_on_btrfs and bootloader_enabled:
        generate_initramfs.append(Task("Write BTRFS bootloader fix", writeBootLoader, (storage, payload, instClass, ksdata)))
    configuration_queue.append(generate_initramfs)

    # join a realm (if required)
    if ksdata.realm.discovered:
        join_realm = TaskQueue("Realm join", N_("Joining realm: %s") % ksdata.realm.discovered)
        join_realm.append(Task("Join a realm", ksdata.realm.execute, (storage, ksdata, instClass)))
        configuration_queue.append(join_realm)

    post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts"))
    post_scripts.append(Task("Run post installation scripts", runPostScripts, (ksdata.scripts,)))
    configuration_queue.append(post_scripts)

    # setup kexec reboot if requested
    if flags.flags.kexec:
        kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec"))
        kexec_setup.append(Task("Setup kexec", setup_kexec))
        configuration_queue.append(kexec_setup)

    # write anaconda related configs & kickstarts
    write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts"))

    # Write the kickstart file to the installed system (or, copy the input
    # kickstart file over if one exists).
    if flags.flags.nosave_output_ks:
        # don't write the kickstart file to the installed system if this has
        # been disabled by the nosave option
        log.warning("Writing of the output kickstart to installed system has been disabled"
                    " by the nosave option.")
    else:
       # write anaconda related configs & kickstarts
        write_configs.append(Task("Store kickstarts", _writeKS, (ksdata,)))

    # Write out the user interaction config file.
    #
    # But make sure it's not written out in the image and directory installation mode,
    # as that might result in spokes being inadvertently hidden when the actual installation
    # starts from the generate image or directory contents.
    if conf.target.is_image:
        log.info("Not writing out user interaction config file due to image install mode.")
    elif conf.target.is_directory:
        log.info("Not writing out user interaction config file due to directory install mode.")
    else:
        write_configs.append(Task("Store user interaction config", screen_access.sam.write_out_config_file))

    # only add write_configs to the main queue if we actually store some kickstarts/configs
    if write_configs.task_count:
        configuration_queue.append(write_configs)

    # notify progress tracking about the number of steps
    progress_init(configuration_queue.task_count)
    # log contents of the main task queue
    log.info(configuration_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = util.item_counter(configuration_queue.queue_count)
    task_started_counter = util.item_counter(configuration_queue.task_count)
    task_completed_counter = util.item_counter(configuration_queue.task_count)
    configuration_queue.queue_started.connect(lambda x: log.info("Queue started: %s (%s)", x.name, next(queue_counter)))
    configuration_queue.task_started.connect(lambda x: log.info("Task started: %s (%s)", x.name, next(task_started_counter)))
    configuration_queue.task_completed.connect(lambda x: log.debug("Task completed: %s (%s) (%1.1f s)",
                                                                   x.name, next(task_completed_counter),
                                                                   x.elapsed_time))
    # start the task queue
    configuration_queue.start()
    # done
    progress_complete()
    def test_task_queue_processing(self):
        """Check that task queue processing works correctly."""
        # callback counting functions
        def task_started_cb(*args):
            self._task_started_count += 1

        def task_completed_cb(*args):
            self._task_completed_count += 1

        def queue_started_cb(*args):
            self._queue_started_count += 1

        def queue_completed_cb(*args):
            self._queue_completed_count += 1

        # verify initial content of callback counters
        assert self._task_started_count == 0
        assert self._task_completed_count == 0
        assert self._queue_started_count == 0
        assert self._queue_completed_count == 0

        # create the group 1
        group1 = TaskQueue(name="group1", status_message="processing group1")
        task1 = Task("increment var 1", self._increment_var1)
        group1.append(task1)

        # create the group 2
        group2 = TaskQueue(name="group2", status_message="processing group2")
        task2a = Task("increment var 2", self._increment_var2)
        group2.append(task2a)

        task2b = Task("increment var 2", self._increment_var2)
        group2.append(task2b)

        # create the group 3
        group3 = TaskQueue(name="group3", status_message="processing group3 (empty)")

        # create the top level queue
        queue1 = TaskQueue(name="queue1")
        queue1.task_started.connect(task_started_cb)
        queue1.task_completed.connect(task_completed_cb)
        queue1.queue_started.connect(queue_started_cb)
        queue1.queue_completed.connect(queue_completed_cb)

        # add the nested queues
        queue1.append(group1)
        queue1.append(group2)
        queue1.append(group3)  # an empty group should be also processed correctly

        # and one top-level task
        task4 = Task("increment var 1", self._increment_var1)
        queue1.append(task4)

        # check that the groups have been added correctly
        assert queue1.items == [
            group1,
            group2,
            group3,
            task4,
        ]
        assert queue1.nested_items == [
            group1,
            task1,
            group2,
            task2a,
            task2b,
            group3,
            task4,
        ]
        assert queue1.queue_count == 3
        assert queue1.task_count == 4
        assert queue1.summary == dedent("""
        Top-level task queue: queue1
        Number of task queues: 3
        Number of tasks: 4
        Task & task group listing:
         Task queue: group1
          Task: increment var 1
         Task queue: group2
          Task: increment var 2
          Task: increment var 2
         Task queue: group3
         Task: increment var 1
        """).strip()

        # start the queue
        queue1.start()

        # check if the tasks were correctly executed
        assert self._test_variable1 == 2
        assert self._test_variable2 == 2

        # check that the task & queue signals were triggered correctly
        assert self._task_started_count == 4
        assert self._task_completed_count == 4
        assert self._queue_started_count == 3
        assert self._queue_completed_count == 3
Exemple #12
0
def doInstall(storage, payload, ksdata):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED
    can_install_bootloader = not conf.target.is_directory and bootloader_enabled

    installation_queue = TaskQueue("Installation queue")
    # connect progress reporting
    installation_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    installation_queue.task_completed.connect(lambda x: progress_step(x.name))

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        # it could be that the threads finish execution before the task is executed,
        # but that should not cause any issues

        def wait_for_all_treads():
            for message in ("Thread %s is running" % n
                            for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()

        # Use a queue with a single task as only TaskQueues have the status_message
        # property used for setting the progress status in the UI.
        wait_for_threads = TaskQueue(
            "Wait for threads to finish",
            N_("Waiting for %s threads to finish") % (threadMgr.running - 1))

        wait_for_threads.append(
            Task("Wait for all threads to finish", wait_for_all_treads))
        installation_queue.append(wait_for_threads)

    # Save system time to HW clock.
    # - this used to be before waiting on threads, but I don't think that's needed
    if conf.system.can_set_hardware_clock:
        # lets just do this as a top-level task - no
        save_hwclock = Task("Save system time to HW clock",
                            timezone.save_hw_clock)
        installation_queue.append(save_hwclock)

    # setup the installation environment
    setup_environment = TaskQueue(
        "Installation environment setup",
        N_("Setting up the installation environment"))
    setup_environment.append(
        Task("Setup addons", ksdata.addons.setup, (storage, ksdata, payload)))
    installation_queue.append(setup_environment)

    # Do partitioning.
    # Depending on current payload the storage might be apparently configured
    # either before or after package/payload installation.
    # So let's have two task queues - early storage & late storage.
    early_storage = TaskQueue("Early storage configuration",
                              N_("Configuring storage"))

    # put custom storage info into ksdata
    early_storage.append(
        Task("Insert custom storage to ksdata",
             task=update_storage_ksdata,
             task_args=(storage, ksdata)))

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(
        clbk_data.msg, clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(
        create_format_pre=message_clbk,
        resize_format_pre=message_clbk,
        wait_for_entropy=entropy_wait_clbk)
    if not conf.target.is_directory:
        early_storage.append(
            Task("Activate filesystems",
                 task=turn_on_filesystems,
                 task_args=(storage, ),
                 task_kwargs={"callbacks": callbacks_reg}))

    early_storage.append(
        Task("Mount filesystems", task=storage.mount_filesystems))

    if payload.needs_storage_configuration and not conf.target.is_directory:
        early_storage.append(
            Task("Write early storage",
                 task=write_storage_configuration,
                 task_args=(storage, )))

    installation_queue.append(early_storage)

    # Run %pre-install scripts with the filesystem mounted and no packages
    pre_install_scripts = TaskQueue("Pre-install scripts",
                                    N_("Running pre-installation scripts"))
    pre_install_scripts.append(
        Task("Run %pre-install scripts", runPreInstallScripts,
             (ksdata.scripts, )))
    installation_queue.append(pre_install_scripts)

    # Do packaging.

    # Discover information about realms to join to determine the need for additional packages.
    realm_discover = TaskQueue("Realm discover",
                               N_("Discovering realm to join"))
    realm_discover.append(Task("Discover realm to join", ksdata.realm.setup))
    installation_queue.append(realm_discover)

    # Check for other possibly needed additional packages.
    pre_install = TaskQueue("Pre install tasks",
                            N_("Running pre-installation tasks"))
    pre_install.append(Task("Setup authselect", ksdata.authselect.setup))
    pre_install.append(Task("Setup firewall", ksdata.firewall.setup))
    pre_install.append(Task("Setup network", ksdata.network.setup))
    # Setup timezone and add chrony as package if timezone was set in KS
    # and "-chrony" wasn't in packages section and/or --nontp wasn't set.
    pre_install.append(
        Task("Setup timezone", ksdata.timezone.setup, (ksdata, )))

    # make name resolution work for rpm scripts in chroot
    if conf.system.provides_resolver_config:
        # we use a custom Task subclass as the sysroot path has to be resolved
        # only when the task is actually started, not at task creation time
        pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot"))

    def run_pre_install():
        """This means to gather what additional packages (if any) are needed & executing payload.pre_install()."""
        # anaconda requires storage packages in order to make sure the target
        # system is bootable and configurable, and some other packages in order
        # to finish setting up the system.
        payload.requirements.add_packages(storage.packages, reason="storage")
        payload.requirements.add_packages(ksdata.realm.packages,
                                          reason="realm")
        payload.requirements.add_packages(ksdata.authselect.packages,
                                          reason="authselect")
        payload.requirements.add_packages(ksdata.firewall.packages,
                                          reason="firewall")
        payload.requirements.add_packages(ksdata.network.packages,
                                          reason="network")
        payload.requirements.add_packages(ksdata.timezone.packages,
                                          reason="ntp",
                                          strong=False)

        if can_install_bootloader:
            payload.requirements.add_packages(storage.bootloader.packages,
                                              reason="bootloader")
        payload.requirements.add_groups(payload.language_groups(),
                                        reason="language groups")
        payload.requirements.add_packages(payload.langpacks(),
                                          reason="langpacks",
                                          strong=False)
        payload.pre_install()

    pre_install.append(
        Task("Find additional packages & run pre_install()", run_pre_install))
    installation_queue.append(pre_install)

    payload_install = TaskQueue("Payload installation", N_("Installing."))
    payload_install.append(Task("Install the payload", payload.install))
    installation_queue.append(payload_install)

    # for some payloads storage is configured after the payload is installed
    if not payload.needs_storage_configuration:
        late_storage = TaskQueue("Late storage configuration",
                                 N_("Configuring storage"))
        late_storage.append(
            Task("Prepare mount targets",
                 task=payload.prepare_mount_targets,
                 task_args=(storage, )))

        if not conf.target.is_directory:
            late_storage.append(
                Task("Write late storage",
                     task=write_storage_configuration,
                     task_args=(storage, )))

        installation_queue.append(late_storage)

    # Do bootloader.
    if can_install_bootloader:
        bootloader_install = TaskQueue("Bootloader installation",
                                       N_("Installing boot loader"))
        bootloader_install.append(
            Task("Install bootloader", write_boot_loader, (storage, payload)))
        installation_queue.append(bootloader_install)

    post_install = TaskQueue("Post-installation setup tasks",
                             (N_("Performing post-installation setup tasks")))
    post_install.append(
        Task("Run post-installation setup tasks", payload.post_install))
    installation_queue.append(post_install)

    # Create snapshot
    snapshot_proxy = STORAGE.get_proxy(SNAPSHOT)

    if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL):
        snapshot_creation = TaskQueue("Creating post installation snapshots",
                                      N_("Creating snapshots"))
        snapshot_requests = ksdata.snapshot.get_requests(
            SNAPSHOT_WHEN_POST_INSTALL)
        snapshot_task = SnapshotCreateTask(storage, snapshot_requests,
                                           SNAPSHOT_WHEN_POST_INSTALL)
        snapshot_creation.append(
            Task("Create post-install snapshots", snapshot_task.run))
        installation_queue.append(snapshot_creation)

    # notify progress tracking about the number of steps
    progress_init(installation_queue.task_count)
    # log contents of the main task queue
    log.info(installation_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = util.item_counter(installation_queue.queue_count)
    task_started_counter = util.item_counter(installation_queue.task_count)
    task_completed_counter = util.item_counter(installation_queue.task_count)
    installation_queue.queue_started.connect(lambda x: log.info(
        "Queue started: %s (%s)", x.name, next(queue_counter)))
    installation_queue.task_started.connect(lambda x: log.info(
        "Task started: %s (%s)", x.name, next(task_started_counter)))
    installation_queue.task_completed.connect(
        lambda x: log.debug("Task completed: %s (%s) (%1.1f s)", x.name,
                            next(task_completed_counter), x.elapsed_time))
    # start the task queue
    installation_queue.start()
    # done
    progress_complete()
Exemple #13
0
def doInstall(storage, payload, ksdata, instClass):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    willInstallBootloader = not flags.flags.dirInstall and (
        not ksdata.bootloader.disabled and ksdata.bootloader != "none")

    installation_queue = TaskQueue("Installation queue")
    # connect progress reporting
    installation_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    installation_queue.queue_completed.connect(
        lambda x: progress_step("%s -- DONE" % x.status_message))

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        # it could be that the threads finish execution before the task is executed,
        # but that should not cause any issues

        def wait_for_all_treads():
            for message in ("Thread %s is running" % n
                            for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()

        # Use a queue with a single task as only TaskQueues have the status_message
        # property used for setting the progress status in the UI.
        wait_for_threads = TaskQueue(
            "Wait for threads to finish",
            N_("Waiting for %s threads to finish") % (threadMgr.running - 1))

        wait_for_threads.append(
            Task("Wait for all threads to finish", wait_for_all_treads))
        installation_queue.append(wait_for_threads)

    # Save system time to HW clock.
    # - this used to be before waiting on threads, but I don't think that's needed
    if flags.can_touch_runtime_system("save system time to HW clock"):
        # lets just do this as a top-level task - no
        save_hwclock = Task("Save system time to HW clock",
                            timezone.save_hw_clock, (ksdata.timezone, ))
        installation_queue.append(save_hwclock)

    # setup the installation environment
    setup_environment = TaskQueue(
        "Installation environment setup",
        N_("Setting up the installation environment"))
    setup_environment.append(
        Task("Setup firstboot", ksdata.firstboot.setup, (ksdata, instClass)))
    setup_environment.append(
        Task("Setup addons", ksdata.addons.setup,
             (storage, ksdata, instClass, payload)))
    installation_queue.append(setup_environment)

    # Do partitioning.
    # Depending on current payload the storage might be apparently configured
    # either before or after package/payload installation.
    # So let's have two task queues - early storage & late storage.
    early_storage = TaskQueue("Early storage configuration",
                              N_("Configuring storage"))

    # put custom storage info into ksdata
    early_storage.append(
        Task("Insert custom storage to ksdata", storage.update_ksdata))

    # pre-storage tasks
    # - Is this actually needed ? It does not appear to do anything right now.
    early_storage.append(Task("Run pre-storage tasks", payload.preStorage))

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    step_clbk = lambda clbk_data: progress_step(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(
        clbk_data.msg, clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(
        create_format_pre=message_clbk,
        create_format_post=step_clbk,
        resize_format_pre=message_clbk,
        resize_format_post=step_clbk,
        wait_for_entropy=entropy_wait_clbk)

    early_storage.append(
        Task("Activate filesystems",
             task=turn_on_filesystems,
             task_args=(storage, ),
             task_kwargs={
                 "mount_only": flags.flags.dirInstall,
                 "callbacks": callbacks_reg
             }))

    early_storage.append(Task("Write early storage",
                              payload.writeStorageEarly))
    installation_queue.append(early_storage)

    # Run %pre-install scripts with the filesystem mounted and no packages
    pre_install_scripts = TaskQueue("Pre-install scripts",
                                    N_("Running pre-installation scripts"))
    pre_install_scripts.append(
        Task("Run %pre-install scripts", runPreInstallScripts,
             (ksdata.scripts, )))
    installation_queue.append(pre_install_scripts)

    # Do packaging.

    # Discover information about realms to join to determine the need for additional packages.
    if ksdata.realm.join_realm:
        realm_discover = TaskQueue("Realm discover",
                                   N_("Discovering realm to join"))
        realm_discover.append(
            Task("Discover realm to join", ksdata.realm.setup))
        installation_queue.append(realm_discover)

    # Check for other possibly needed additional packages.
    pre_install = TaskQueue("Pre install tasks",
                            N_("Running pre-installation tasks"))
    pre_install.append(Task("Setup authconfig", ksdata.authconfig.setup))
    pre_install.append(Task("Setup firewall", ksdata.firewall.setup))
    pre_install.append(Task("Setup network", ksdata.network.setup))
    # Setup timezone and add chrony as package if timezone was set in KS
    # and "-chrony" wasn't in packages section and/or --nontp wasn't set.
    pre_install.append(
        Task("Setup timezone", ksdata.timezone.setup, (ksdata, )))

    # make name resolution work for rpm scripts in chroot
    if flags.can_touch_runtime_system("copy /etc/resolv.conf to sysroot"):
        # we use a custom Task subclass as the sysroot path has to be resolved
        # only when the task is actually started, not at task creation time
        pre_install.append(WriteResolvConfTask("Copy /resolv.conf to sysroot"))

    def run_pre_install():
        """This means to gather what additional packages (if any) are needed & executing payload.preInstall()."""
        # anaconda requires storage packages in order to make sure the target
        # system is bootable and configurable, and some other packages in order
        # to finish setting up the system.
        packages = storage.packages + ksdata.realm.packages
        packages += ksdata.authconfig.packages + ksdata.firewall.packages + ksdata.network.packages

        if willInstallBootloader:
            packages += storage.bootloader.packages

        # don't try to install packages from the install class' ignored list and the
        # explicitly excluded ones (user takes the responsibility)
        packages = [
            p for p in packages if p not in instClass.ignoredPackages
            and p not in ksdata.packages.excludedList
        ]
        payload.preInstall(packages=packages, groups=payload.languageGroups())

    pre_install.append(
        Task("Find additional packages & run preInstall()", run_pre_install))
    installation_queue.append(pre_install)

    payload_install = TaskQueue("Payload installation", N_("Installing."))
    payload_install.append(Task("Install the payload", payload.install))
    installation_queue.append(payload_install)

    # for some payloads storage is configured after the payload is installed
    late_storage = TaskQueue("Late storage configuration",
                             N_("Configuring storage"))
    late_storage.append(Task("Write late storage", payload.writeStorageLate))
    installation_queue.append(late_storage)

    # Do bootloader.
    if willInstallBootloader:
        bootloader_install = TaskQueue("Bootloader installation",
                                       N_("Installing boot loader"))
        bootloader_install.append(
            Task("Install bootloader", writeBootLoader,
                 (storage, payload, instClass, ksdata)))
        installation_queue.append(bootloader_install)

    post_install = TaskQueue("Post-installation setup tasks",
                             (N_("Performing post-installation setup tasks")))
    post_install.append(
        Task("Run post-installation setup tasks", payload.postInstall))
    installation_queue.append(post_install)

    # Create snapshot
    if ksdata.snapshot and ksdata.snapshot.has_snapshot(
            SNAPSHOT_WHEN_POST_INSTALL):
        snapshot_creation = TaskQueue("Creating post installation snapshots",
                                      N_("Creating snapshots"))
        snapshot_creation.append(
            Task("Create post-install snapshots", ksdata.snapshot.execute,
                 (storage, ksdata, instClass)))
        installation_queue.append(snapshot_creation)

    # notify progress tracking about the number of steps
    progress_init(len(installation_queue))
    # log contents of the main task queue
    log.info(installation_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = iutil.item_counter(installation_queue.queue_count)
    task_started_counter = iutil.item_counter(installation_queue.task_count)
    task_completed_counter = iutil.item_counter(installation_queue.task_count)
    installation_queue.queue_started.connect(lambda x: log.info(
        "Queue started: %s (%s)", x.name, next(queue_counter)))
    installation_queue.task_started.connect(lambda x: log.info(
        "Task started: %s (%s)", x.name, next(task_started_counter)))
    installation_queue.task_completed.connect(
        lambda x: log.debug("Task completed: %s (%s) (%1.1f s)", x.name,
                            next(task_completed_counter), x.elapsed_time))
    # start the task queue
    installation_queue.start()
    # done
    progress_complete()
Exemple #14
0
    def _prepare_installation(self, payload, ksdata):
        """Perform an installation.  This method takes the ksdata as prepared by
           the UI (the first hub, in graphical mode) and applies it to the disk.
           The two main tasks for this are putting filesystems onto disks and
           installing packages onto those filesystems.
        """
        installation_queue = TaskQueue("Installation queue")
        # connect progress reporting
        installation_queue.queue_started.connect(
            lambda x: progress_message(x.status_message))
        installation_queue.task_completed.connect(
            lambda x: progress_step(x.name))

        # setup the installation environment
        setup_environment = TaskQueue(
            "Installation environment setup",
            _("Setting up the installation environment"))

        boss_proxy = BOSS.get_proxy()
        for service_name, object_path in boss_proxy.CollectConfigureRuntimeTasks(
        ):
            task_proxy = DBus.get_proxy(service_name, object_path)
            setup_environment.append(DBusTask(task_proxy))

        # Add configuration tasks for the Localization DBus module.
        if is_module_available(LOCALIZATION):
            localization_proxy = LOCALIZATION.get_proxy()
            # Populate the missing keyboard values before the payload installation,
            # so the module requirements can be generated for the right configuration.
            # FIXME: Make sure that the module always returns right values.
            populate_task = localization_proxy.PopulateMissingKeyboardConfigurationWithTask(
            )
            setup_environment.append_dbus_tasks(LOCALIZATION, [populate_task])

        installation_queue.append(setup_environment)

        # Do partitioning.
        # Depending on current payload the storage might be apparently configured
        # either before or after package/payload installation.
        # So let's have two task queues - early storage & late storage.
        storage_proxy = STORAGE.get_proxy()
        early_storage = TaskQueue("Early storage configuration",
                                  _("Configuring storage"))
        early_storage.append_dbus_tasks(STORAGE,
                                        storage_proxy.InstallWithTasks())

        if payload.type == PAYLOAD_TYPE_DNF:
            conf_task = storage_proxy.WriteConfigurationWithTask()
            early_storage.append_dbus_tasks(STORAGE, [conf_task])

        installation_queue.append(early_storage)

        # Run %pre-install scripts with the filesystem mounted and no packages
        pre_install_scripts = TaskQueue("Pre-install scripts",
                                        _("Running pre-installation scripts"))
        pre_install_scripts.append(
            Task("Run %pre-install scripts", runPreInstallScripts,
                 (ksdata.scripts, )))
        installation_queue.append(pre_install_scripts)

        # Do various pre-installation tasks
        # - try to discover a realm (if any)
        # - check for possibly needed additional packages.
        pre_install = TaskQueue("Pre install tasks",
                                _("Running pre-installation tasks"))

        if is_module_available(SECURITY):
            security_proxy = SECURITY.get_proxy()

            # Discover a realm.
            pre_install.append_dbus_tasks(
                SECURITY, [security_proxy.DiscoverRealmWithTask()])

            # Set up FIPS for the payload installation.
            fips_task = security_proxy.PreconfigureFIPSWithTask(payload.type)
            pre_install.append_dbus_tasks(SECURITY, [fips_task])

        # Install the payload.
        pre_install.append(
            Task("Find additional packages & run pre_install()",
                 payload.pre_install))
        installation_queue.append(pre_install)

        payload_install = TaskQueue("Payload installation", _("Installing."))
        payload_install.append(Task("Install the payload", payload.install))
        installation_queue.append(payload_install)

        # for some payloads storage is configured after the payload is installed
        if payload.type != PAYLOAD_TYPE_DNF:
            late_storage = TaskQueue("Late storage configuration",
                                     _("Configuring storage"))
            conf_task = storage_proxy.WriteConfigurationWithTask()
            late_storage.append_dbus_tasks(STORAGE, [conf_task])
            installation_queue.append(late_storage)

        # Do bootloader.
        bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
        bootloader_install = TaskQueue("Bootloader installation",
                                       _("Installing boot loader"))

        def run_configure_bootloader():
            tasks = boss_proxy.CollectConfigureBootloaderTasks(
                payload.kernel_version_list)

            for service, task in tasks:
                sync_run_task(DBus.get_proxy(service, task))

        bootloader_install.append(
            Task("Configure bootloader", run_configure_bootloader))

        def run_install_bootloader():
            tasks = bootloader_proxy.InstallBootloaderWithTasks(
                payload.type, payload.kernel_version_list)

            for task in tasks:
                sync_run_task(STORAGE.get_proxy(task))

        bootloader_install.append(
            Task("Install bootloader", run_install_bootloader))
        installation_queue.append(bootloader_install)

        post_install = TaskQueue("Post-installation setup tasks",
                                 _("Performing post-installation setup tasks"))
        post_install.append(
            Task("Run post-installation setup tasks", payload.post_install))
        installation_queue.append(post_install)

        # Create snapshot
        snapshot_proxy = STORAGE.get_proxy(SNAPSHOT)

        if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL):
            snapshot_creation = TaskQueue(
                "Creating post installation snapshots",
                _("Creating snapshots"))
            snapshot_task = snapshot_proxy.CreateWithTask(
                SNAPSHOT_WHEN_POST_INSTALL)
            snapshot_creation.append_dbus_tasks(STORAGE, [snapshot_task])
            installation_queue.append(snapshot_creation)

        return installation_queue
    def test_task_queue_processing(self):
        """Check that task queue processing works correctly."""

        # callback counting functions
        def task_started_cb(*args):
            self._task_started_count += 1

        def task_completed_cb(*args):
            self._task_completed_count += 1

        def queue_started_cb(*args):
            self._queue_started_count += 1

        def queue_completed_cb(*args):
            self._queue_completed_count += 1

        # verify initial content of callback counters
        assert self._task_started_count == 0
        assert self._task_completed_count == 0
        assert self._queue_started_count == 0
        assert self._queue_completed_count == 0
        # create some groups
        group1 = TaskQueue(name="group1", status_message="processing group1")
        group1.append(Task("increment var 1", self._increment_var1))
        group2 = TaskQueue(name="group2", status_message="processing group2")
        group2.append(Task("increment var 2", self._increment_var2))
        group2.append(Task("increment var 2", self._increment_var2))
        group3 = TaskQueue(name="group3",
                           status_message="processing group3 (empty)")
        # create the top level queue
        queue1 = TaskQueue(name="queue1")
        # connect to it's top-level callbacks
        queue1.task_started.connect(task_started_cb)
        queue1.task_completed.connect(task_completed_cb)
        queue1.queue_started.connect(queue_started_cb)
        queue1.queue_completed.connect(queue_completed_cb)
        # add the nested queues
        queue1.append(group1)
        queue1.append(group2)
        queue1.append(
            group3)  # an empty group should be also processed correctly
        # and one top-level task
        queue1.append(Task("increment var 1", self._increment_var1))
        # check that the groups have been added correctly
        assert len(queue1) == 4
        assert queue1[0].name == "group1"
        assert len(queue1[0]) == 1
        assert queue1[1].name == "group2"
        assert len(queue1[1]) == 2
        assert queue1[2].name == "group3"
        assert len(queue1[2]) == 0
        assert queue1.queue_count == 3
        assert queue1.task_count == 4
        # summary is generated recursively
        assert bool(queue1.summary)
        # start the queue
        queue1.start()
        # check if the tasks were correctly executed
        assert self._test_variable1 == 2
        assert self._test_variable2 == 2
        assert self._test_variable3 == 0
        # check that the task & queue signals were triggered correctly
        assert self._task_started_count == 4
        assert self._task_completed_count == 4
        assert self._queue_started_count == 3
        assert self._queue_completed_count == 3
        # check queue state after execution
        assert not queue1.running
        assert queue1.done
        assert queue1.current_task_number is None
        assert queue1.current_queue_number is None
        # create another queue and add some task groups and tasks to it
        group4 = TaskQueue(name="group 4", status_message="processing group4")
        group4.append(Task("increment var 1", self._increment_var1))
        group5 = TaskQueue(name="group 5", status_message="processing group5")
        group5.append(Task("increment var 3", self._increment_var3))
        queue2 = TaskQueue(name="queue2")
        queue2.append(group4)
        queue2.append(group5)
        # start the second queue
        queue2.start()
        # check the tasks also properly executed
        assert self._test_variable1 == 3
        assert self._test_variable2 == 2
        assert self._test_variable3 == 1
Exemple #16
0
def _prepare_configuration(payload, ksdata):
    """Configure the installed system."""

    configuration_queue = TaskQueue("Configuration queue")
    # connect progress reporting
    configuration_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    configuration_queue.task_completed.connect(lambda x: progress_step(x.name))

    # add installation tasks for the Subscription DBus module
    if is_module_available(SUBSCRIPTION):
        # we only run the tasks if the Subscription module is available
        subscription_config = TaskQueue("Subscription configuration",
                                        N_("Configuring Red Hat subscription"))
        subscription_proxy = SUBSCRIPTION.get_proxy()
        subscription_dbus_tasks = subscription_proxy.InstallWithTasks()
        subscription_config.append_dbus_tasks(SUBSCRIPTION,
                                              subscription_dbus_tasks)
        configuration_queue.append(subscription_config)

    # schedule the execute methods of ksdata that require an installed system to be present
    os_config = TaskQueue("Installed system configuration",
                          N_("Configuring installed system"))

    # add installation tasks for the Security DBus module
    security_proxy = SECURITY.get_proxy()
    security_dbus_tasks = security_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(SECURITY, security_dbus_tasks)

    # add installation tasks for the Timezone DBus module
    # run these tasks before tasks of the Services module
    if is_module_available(TIMEZONE):
        timezone_proxy = TIMEZONE.get_proxy()
        timezone_dbus_tasks = timezone_proxy.InstallWithTasks()
        os_config.append_dbus_tasks(TIMEZONE, timezone_dbus_tasks)

    # add installation tasks for the Services DBus module
    services_proxy = SERVICES.get_proxy()
    services_dbus_tasks = services_proxy.InstallWithTasks()
    os_config.append_dbus_tasks(SERVICES, services_dbus_tasks)

    # add installation tasks for the Localization DBus module
    if is_module_available(LOCALIZATION):
        localization_proxy = LOCALIZATION.get_proxy()
        localization_dbus_tasks = localization_proxy.InstallWithTasks()
        os_config.append_dbus_tasks(LOCALIZATION, localization_dbus_tasks)

    # add the Firewall configuration task
    if conf.target.can_configure_network:
        firewall_proxy = NETWORK.get_proxy(FIREWALL)
        firewall_dbus_task = firewall_proxy.InstallWithTask()
        os_config.append_dbus_tasks(NETWORK, [firewall_dbus_task])

    configuration_queue.append(os_config)

    # schedule network configuration (if required)
    if conf.target.can_configure_network and conf.system.provides_network_config:
        overwrite = payload.type in PAYLOAD_LIVE_TYPES
        network_config = TaskQueue("Network configuration",
                                   N_("Writing network configuration"))
        network_config.append(
            Task("Network configuration", network.write_configuration,
                 (overwrite, )))
        configuration_queue.append(network_config)

    # add installation tasks for the Users DBus module
    if is_module_available(USERS):
        user_config = TaskQueue("User creation", N_("Creating users"))
        users_proxy = USERS.get_proxy()
        users_dbus_tasks = users_proxy.InstallWithTasks()
        user_config.append_dbus_tasks(USERS, users_dbus_tasks)
        configuration_queue.append(user_config)

    # Anaconda addon configuration
    addon_config = TaskQueue("Anaconda addon configuration",
                             N_("Configuring addons"))

    # there is no longer a User class & addons should no longer need it
    # FIXME: drop user class parameter from the API & all known addons
    addon_config.append(
        Task("Configure Anaconda addons", ksdata.addons.execute,
             (None, ksdata, None, payload)))

    boss_proxy = BOSS.get_proxy()
    addon_config.append_dbus_tasks(BOSS, [boss_proxy.InstallSystemWithTask()])

    configuration_queue.append(addon_config)

    # Initramfs generation
    generate_initramfs = TaskQueue("Initramfs generation",
                                   N_("Generating initramfs"))
    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)

    def run_generate_initramfs():
        tasks = bootloader_proxy.GenerateInitramfsWithTasks(
            payload.type, payload.kernel_version_list)

        for task in tasks:
            sync_run_task(STORAGE.get_proxy(task))

    generate_initramfs.append(
        Task("Generate initramfs", run_generate_initramfs))
    configuration_queue.append(generate_initramfs)

    # Configure FIPS.
    configuration_queue.append_dbus_tasks(
        SECURITY, [security_proxy.ConfigureFIPSWithTask()])

    # realm join
    # - this can run only after network is configured in the target system chroot
    configuration_queue.append_dbus_tasks(SECURITY,
                                          [security_proxy.JoinRealmWithTask()])

    post_scripts = TaskQueue("Post installation scripts",
                             N_("Running post-installation scripts"))
    post_scripts.append(
        Task("Run post installation scripts", runPostScripts,
             (ksdata.scripts, )))
    configuration_queue.append(post_scripts)

    # setup kexec reboot if requested
    if flags.flags.kexec:
        kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec"))
        kexec_setup.append(Task("Setup kexec", setup_kexec))
        configuration_queue.append(kexec_setup)

    # write anaconda related configs & kickstarts
    write_configs = TaskQueue("Write configs and kickstarts",
                              N_("Storing configuration files and kickstarts"))

    # Write the kickstart file to the installed system (or, copy the input
    # kickstart file over if one exists).
    if flags.flags.nosave_output_ks:
        # don't write the kickstart file to the installed system if this has
        # been disabled by the nosave option
        log.warning(
            "Writing of the output kickstart to installed system has been disabled"
            " by the nosave option.")
    else:
        # write anaconda related configs & kickstarts
        write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, )))

    # only add write_configs to the main queue if we actually store some kickstarts/configs
    if write_configs.task_count:
        configuration_queue.append(write_configs)

    return configuration_queue
Exemple #17
0
def _prepare_installation(storage, payload, ksdata):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED
    can_install_bootloader = not conf.target.is_directory and bootloader_enabled

    installation_queue = TaskQueue("Installation queue")
    # connect progress reporting
    installation_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    installation_queue.task_completed.connect(lambda x: progress_step(x.name))

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        # it could be that the threads finish execution before the task is executed,
        # but that should not cause any issues

        def wait_for_all_treads():
            for message in ("Thread %s is running" % n
                            for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()

        # Use a queue with a single task as only TaskQueues have the status_message
        # property used for setting the progress status in the UI.
        wait_for_threads = TaskQueue(
            "Wait for threads to finish",
            N_("Waiting for %s threads to finish") % (threadMgr.running - 1))

        wait_for_threads.append(
            Task("Wait for all threads to finish", wait_for_all_treads))
        installation_queue.append(wait_for_threads)

    # Save system time to HW clock.
    # - this used to be before waiting on threads, but I don't think that's needed
    if conf.system.can_set_hardware_clock:
        # lets just do this as a top-level task - no
        save_hwclock = Task("Save system time to HW clock",
                            timezone.save_hw_clock)
        installation_queue.append(save_hwclock)

    # setup the installation environment
    setup_environment = TaskQueue(
        "Installation environment setup",
        N_("Setting up the installation environment"))
    setup_environment.append(
        Task("Setup addons", ksdata.addons.setup, (storage, ksdata, payload)))

    boss_proxy = BOSS.get_proxy()
    setup_environment.append_dbus_tasks(
        BOSS, [boss_proxy.ConfigureRuntimeWithTask()])

    installation_queue.append(setup_environment)

    # Do partitioning.
    # Depending on current payload the storage might be apparently configured
    # either before or after package/payload installation.
    # So let's have two task queues - early storage & late storage.
    early_storage = TaskQueue("Early storage configuration",
                              N_("Configuring storage"))

    # put custom storage info into ksdata
    early_storage.append(
        Task("Insert custom storage to ksdata",
             task=update_storage_ksdata,
             task_args=(storage, ksdata)))

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(
        clbk_data.msg, clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(
        create_format_pre=message_clbk,
        resize_format_pre=message_clbk,
        wait_for_entropy=entropy_wait_clbk)
    if not conf.target.is_directory:
        early_storage.append(
            Task("Activate filesystems",
                 task=turn_on_filesystems,
                 task_args=(storage, ),
                 task_kwargs={"callbacks": callbacks_reg}))

    early_storage.append(
        Task("Mount filesystems", task=storage.mount_filesystems))

    if payload.needs_storage_configuration and not conf.target.is_directory:
        early_storage.append(
            Task("Write early storage",
                 task=write_storage_configuration,
                 task_args=(storage, )))

    installation_queue.append(early_storage)

    # Run %pre-install scripts with the filesystem mounted and no packages
    pre_install_scripts = TaskQueue("Pre-install scripts",
                                    N_("Running pre-installation scripts"))
    pre_install_scripts.append(
        Task("Run %pre-install scripts", runPreInstallScripts,
             (ksdata.scripts, )))
    installation_queue.append(pre_install_scripts)

    # Do various pre-installation tasks
    # - try to discover a realm (if any)
    # - check for possibly needed additional packages.
    pre_install = TaskQueue("Pre install tasks",
                            N_("Running pre-installation tasks"))
    # Setup timezone and add chrony as package if timezone was set in KS
    # and "-chrony" wasn't in packages section and/or --nontp wasn't set.
    pre_install.append(
        Task("Setup timezone", ksdata.timezone.setup, (ksdata, )))

    # make name resolution work for rpm scripts in chroot
    if conf.system.provides_resolver_config:
        # we use a custom Task subclass as the sysroot path has to be resolved
        # only when the task is actually started, not at task creation time
        pre_install.append(WriteResolvConfTask("Copy resolv.conf to sysroot"))

    # realm discovery
    security_proxy = SECURITY.get_proxy()
    pre_install.append_dbus_tasks(SECURITY,
                                  [security_proxy.DiscoverRealmWithTask()])

    def run_pre_install():
        """This means to gather what additional packages (if any) are needed & executing payload.pre_install()."""
        # anaconda requires storage packages in order to make sure the target
        # system is bootable and configurable, and some other packages in order
        # to finish setting up the system.
        payload.requirements.add_packages(storage.packages, reason="storage")
        payload.requirements.add_packages(ksdata.timezone.packages,
                                          reason="ntp",
                                          strong=False)

        if can_install_bootloader:
            payload.requirements.add_packages(storage.bootloader.packages,
                                              reason="bootloader")
        if kernel_arguments.is_enabled("fips"):
            payload.requirements.add_packages(['/usr/bin/fips-mode-setup'],
                                              reason="compliance")

        payload.requirements.add_groups(payload.language_groups(),
                                        reason="language groups")
        payload.requirements.add_packages(payload.langpacks(),
                                          reason="langpacks",
                                          strong=False)

        # add package requirements from modules
        # - iterate over all modules we know have valid package requirements
        # - add any requirements found to the payload requirement tracking
        modules_with_package_requirements = [SECURITY, NETWORK]
        for module in modules_with_package_requirements:
            module_proxy = module.get_proxy()
            module_requirements = Requirement.from_structure_list(
                module_proxy.CollectRequirements())
            log.debug("Adding requirements for module %s : %s", module,
                      module_requirements)
            payload.requirements.add_requirements(module_requirements)

        payload.pre_install()

    pre_install.append(
        Task("Find additional packages & run pre_install()", run_pre_install))
    installation_queue.append(pre_install)

    payload_install = TaskQueue("Payload installation", N_("Installing."))
    payload_install.append(Task("Install the payload", payload.install))
    installation_queue.append(payload_install)

    # for some payloads storage is configured after the payload is installed
    if not payload.needs_storage_configuration and not conf.target.is_directory:
        late_storage = TaskQueue("Late storage configuration",
                                 N_("Configuring storage"))
        late_storage.append(
            Task("Write late storage",
                 task=write_storage_configuration,
                 task_args=(storage, )))

        installation_queue.append(late_storage)

    # Do bootloader.
    if can_install_bootloader:
        bootloader_install = TaskQueue("Bootloader installation",
                                       N_("Installing boot loader"))
        bootloader_install.append(
            Task("Install bootloader", write_boot_loader, (storage, payload)))
        installation_queue.append(bootloader_install)

    post_install = TaskQueue("Post-installation setup tasks",
                             (N_("Performing post-installation setup tasks")))
    post_install.append(
        Task("Run post-installation setup tasks", payload.post_install))
    installation_queue.append(post_install)

    # Create snapshot
    snapshot_proxy = STORAGE.get_proxy(SNAPSHOT)

    if snapshot_proxy.IsRequested(SNAPSHOT_WHEN_POST_INSTALL):
        snapshot_creation = TaskQueue("Creating post installation snapshots",
                                      N_("Creating snapshots"))
        snapshot_requests = ksdata.snapshot.get_requests(
            SNAPSHOT_WHEN_POST_INSTALL)
        snapshot_task = SnapshotCreateTask(storage, snapshot_requests,
                                           SNAPSHOT_WHEN_POST_INSTALL)
        snapshot_creation.append(
            Task("Create post-install snapshots", snapshot_task.run))
        installation_queue.append(snapshot_creation)

    return installation_queue
Exemple #18
0
def doConfiguration(storage, payload, ksdata):
    """Configure the installed system."""

    configuration_queue = TaskQueue("Configuration queue")
    # connect progress reporting
    configuration_queue.queue_started.connect(
        lambda x: progress_message(x.status_message))
    configuration_queue.task_completed.connect(lambda x: progress_step(x.name))

    # schedule the execute methods of ksdata that require an installed system to be present
    os_config = TaskQueue("Installed system configuration",
                          N_("Configuring installed system"))
    os_config.append(Task("Configure authselect", ksdata.authselect.execute))
    os_config.append(Task("Configure SELinux", ksdata.selinux.execute))
    os_config.append(
        Task("Configure first boot tasks", ksdata.firstboot.execute))
    os_config.append(Task("Configure services", ksdata.services.execute))
    os_config.append(Task("Configure keyboard", ksdata.keyboard.execute))
    os_config.append(Task("Configure timezone", ksdata.timezone.execute))
    os_config.append(Task("Configure language", ksdata.lang.execute))
    os_config.append(Task("Configure firewall", ksdata.firewall.execute))
    os_config.append(Task("Configure X", ksdata.xconfig.execute))
    configuration_queue.append(os_config)

    # schedule network configuration (if required)
    if conf.system.provides_network_config:
        network_config = TaskQueue("Network configuration",
                                   N_("Writing network configuration"))
        network_config.append(
            Task("Network configuration", ksdata.network.execute, (payload, )))
        configuration_queue.append(network_config)

    # creating users and groups requires some pre-configuration.
    u = Users()
    user_config = TaskQueue("User creation", N_("Creating users"))
    user_config.append(
        Task("Configure root", ksdata.rootpw.execute, (storage, ksdata, u)))
    user_config.append(
        Task("Configure user groups", ksdata.group.execute,
             (storage, ksdata, u)))
    user_config.append(
        Task("Configure user", ksdata.user.execute, (storage, ksdata, u)))
    user_config.append(
        Task("Configure SSH key", ksdata.sshkey.execute, (storage, ksdata, u)))
    configuration_queue.append(user_config)

    # Anaconda addon configuration
    addon_config = TaskQueue("Anaconda addon configuration",
                             N_("Configuring addons"))
    addon_config.append(
        Task("Configure Anaconda addons", ksdata.addons.execute,
             (storage, ksdata, u, payload)))
    configuration_queue.append(addon_config)

    # Initramfs generation
    generate_initramfs = TaskQueue("Initramfs generation",
                                   N_("Generating initramfs"))
    generate_initramfs.append(
        Task("Generate initramfs", payload.recreate_initrds))

    # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is
    # recreated after the first writeBootLoader call. This reruns it after the new initrd has
    # been created, fixing the kernel root and subvol args and adding the missing initrd entry.
    boot_on_btrfs = isinstance(storage.mountpoints.get("/"), BTRFSDevice)

    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED

    if isinstance(payload,
                  LiveImagePayload) and boot_on_btrfs and bootloader_enabled:
        generate_initramfs.append(
            Task("Write BTRFS bootloader fix", write_boot_loader,
                 (storage, payload)))

    # Invoking zipl should be the last thing done on a s390x installation (see #1652727).
    if arch.is_s390() and not conf.target.is_directory and bootloader_enabled:
        generate_initramfs.append(
            Task("Rerun zipl", lambda: util.execInSysroot("zipl", [])))

    configuration_queue.append(generate_initramfs)

    # join a realm (if required)
    if ksdata.realm.discovered:
        join_realm = TaskQueue(
            "Realm join",
            N_("Joining realm: %s") % ksdata.realm.discovered)
        join_realm.append(Task("Join a realm", ksdata.realm.execute))
        configuration_queue.append(join_realm)

    post_scripts = TaskQueue("Post installation scripts",
                             N_("Running post-installation scripts"))
    post_scripts.append(
        Task("Run post installation scripts", runPostScripts,
             (ksdata.scripts, )))
    configuration_queue.append(post_scripts)

    # setup kexec reboot if requested
    if flags.flags.kexec:
        kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec"))
        kexec_setup.append(Task("Setup kexec", setup_kexec))
        configuration_queue.append(kexec_setup)

    # write anaconda related configs & kickstarts
    write_configs = TaskQueue("Write configs and kickstarts",
                              N_("Storing configuration files and kickstarts"))

    # Write the kickstart file to the installed system (or, copy the input
    # kickstart file over if one exists).
    if flags.flags.nosave_output_ks:
        # don't write the kickstart file to the installed system if this has
        # been disabled by the nosave option
        log.warning(
            "Writing of the output kickstart to installed system has been disabled"
            " by the nosave option.")
    else:
        # write anaconda related configs & kickstarts
        write_configs.append(Task("Store kickstarts", _writeKS, (ksdata, )))

    # Write out the user interaction config file.
    #
    # But make sure it's not written out in the image and directory installation mode,
    # as that might result in spokes being inadvertently hidden when the actual installation
    # starts from the generate image or directory contents.
    if conf.target.is_image:
        log.info(
            "Not writing out user interaction config file due to image install mode."
        )
    elif conf.target.is_directory:
        log.info(
            "Not writing out user interaction config file due to directory install mode."
        )
    else:
        write_configs.append(
            Task("Store user interaction config",
                 screen_access.sam.write_out_config_file))

    # only add write_configs to the main queue if we actually store some kickstarts/configs
    if write_configs.task_count:
        configuration_queue.append(write_configs)

    # notify progress tracking about the number of steps
    progress_init(configuration_queue.task_count)
    # log contents of the main task queue
    log.info(configuration_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = util.item_counter(configuration_queue.queue_count)
    task_started_counter = util.item_counter(configuration_queue.task_count)
    task_completed_counter = util.item_counter(configuration_queue.task_count)
    configuration_queue.queue_started.connect(lambda x: log.info(
        "Queue started: %s (%s)", x.name, next(queue_counter)))
    configuration_queue.task_started.connect(lambda x: log.info(
        "Task started: %s (%s)", x.name, next(task_started_counter)))
    configuration_queue.task_completed.connect(
        lambda x: log.debug("Task completed: %s (%s) (%1.1f s)", x.name,
                            next(task_completed_counter), x.elapsed_time))
    # start the task queue
    configuration_queue.start()
    # done
    progress_complete()
Exemple #19
0
def doInstall(storage, payload, ksdata, instClass):
    """Perform an installation.  This method takes the ksdata as prepared by
       the UI (the first hub, in graphical mode) and applies it to the disk.
       The two main tasks for this are putting filesystems onto disks and
       installing packages onto those filesystems.
    """
    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED
    can_install_bootloader = not conf.target.is_directory and bootloader_enabled

    installation_queue = TaskQueue("Installation queue")
    # connect progress reporting
    installation_queue.queue_started.connect(lambda x: progress_message(x.status_message))
    installation_queue.task_completed.connect(lambda x: progress_step(x.name))

    # This should be the only thread running, wait for the others to finish if not.
    if threadMgr.running > 1:
        # it could be that the threads finish execution before the task is executed,
        # but that should not cause any issues

        def wait_for_all_treads():
            for message in ("Thread %s is running" % n for n in threadMgr.names):
                log.debug(message)
            threadMgr.wait_all()

        # Use a queue with a single task as only TaskQueues have the status_message
        # property used for setting the progress status in the UI.
        wait_for_threads = TaskQueue("Wait for threads to finish",
                                     N_("Waiting for %s threads to finish") % (threadMgr.running - 1))

        wait_for_threads.append(Task("Wait for all threads to finish", wait_for_all_treads))
        installation_queue.append(wait_for_threads)

    # Save system time to HW clock.
    # - this used to be before waiting on threads, but I don't think that's needed
    if conf.system.can_set_hardware_clock:
        # lets just do this as a top-level task - no
        save_hwclock = Task("Save system time to HW clock", timezone.save_hw_clock)
        installation_queue.append(save_hwclock)

    # setup the installation environment
    setup_environment = TaskQueue("Installation environment setup", N_("Setting up the installation environment"))
    setup_environment.append(Task("Setup addons", ksdata.addons.setup, (storage, ksdata, instClass, payload)))
    installation_queue.append(setup_environment)

    # Do partitioning.
    # Depending on current payload the storage might be apparently configured
    # either before or after package/payload installation.
    # So let's have two task queues - early storage & late storage.
    early_storage = TaskQueue("Early storage configuration", N_("Configuring storage"))

    # put custom storage info into ksdata, but not if just assigning mount points
    manual_part_proxy = STORAGE.get_proxy(MANUAL_PARTITIONING)

    if not manual_part_proxy.Enabled:
        early_storage.append(Task("Insert custom storage to ksdata", storage.update_ksdata))

    # pre-storage tasks
    # - Is this actually needed ? It does not appear to do anything right now.
    early_storage.append(Task("Run pre-storage tasks", payload.preStorage))

    # callbacks for blivet
    message_clbk = lambda clbk_data: progress_message(clbk_data.msg)
    entropy_wait_clbk = lambda clbk_data: wait_for_entropy(clbk_data.msg,
                                                           clbk_data.min_entropy, ksdata)
    callbacks_reg = callbacks.create_new_callbacks_register(create_format_pre=message_clbk,
                                                            resize_format_pre=message_clbk,
                                                            wait_for_entropy=entropy_wait_clbk)
    if conf.target.is_directory:
        early_storage.append(Task("Mount filesystems",
                                  task=storage.mount_filesystems))
    else:
        early_storage.append(Task("Activate filesystems",
                                  task=turn_on_filesystems,
                                  task_args=(storage,),
                                  task_kwargs={"callbacks": callbacks_reg}))

    early_storage.append(Task("Write early storage", payload.writeStorageEarly))
    installation_queue.append(early_storage)

    # Run %pre-install scripts with the filesystem mounted and no packages
    pre_install_scripts = TaskQueue("Pre-install scripts", N_("Running pre-installation scripts"))
    pre_install_scripts.append(Task("Run %pre-install scripts", runPreInstallScripts, (ksdata.scripts,)))
    installation_queue.append(pre_install_scripts)

    # Do packaging.

    # Discover information about realms to join to determine the need for additional packages.
    realm_discover = TaskQueue("Realm discover", N_("Discovering realm to join"))
    realm_discover.append(Task("Discover realm to join", ksdata.realm.setup))
    installation_queue.append(realm_discover)

    # Check for other possibly needed additional packages.
    pre_install = TaskQueue("Pre install tasks", N_("Running pre-installation tasks"))
    pre_install.append(Task("Setup authselect", ksdata.authselect.setup))
    pre_install.append(Task("Setup firewall", ksdata.firewall.setup))
    pre_install.append(Task("Setup network", ksdata.network.setup))
    # Setup timezone and add chrony as package if timezone was set in KS
    # and "-chrony" wasn't in packages section and/or --nontp wasn't set.
    pre_install.append(Task("Setup timezone", ksdata.timezone.setup, (ksdata,)))

    # make name resolution work for rpm scripts in chroot
    if conf.system.provides_resolver_config:
        # we use a custom Task subclass as the sysroot path has to be resolved
        # only when the task is actually started, not at task creation time
        pre_install.append(WriteResolvConfTask("Copy /resolv.conf to sysroot"))

    def run_pre_install():
        """This means to gather what additional packages (if any) are needed & executing payload.preInstall()."""
        # anaconda requires storage packages in order to make sure the target
        # system is bootable and configurable, and some other packages in order
        # to finish setting up the system.
        payload.requirements.add_packages(storage.packages, reason="storage")
        payload.requirements.add_packages(ksdata.realm.packages, reason="realm")
        payload.requirements.add_packages(ksdata.authselect.packages, reason="authselect")
        payload.requirements.add_packages(ksdata.firewall.packages, reason="firewall")
        payload.requirements.add_packages(ksdata.network.packages, reason="network")
        payload.requirements.add_packages(ksdata.timezone.packages, reason="ntp", strong=False)

        if can_install_bootloader:
            payload.requirements.add_packages(storage.bootloader.packages, reason="bootloader")
        payload.requirements.add_groups(payload.languageGroups(), reason="language groups")
        payload.requirements.add_packages(payload.langpacks(), reason="langpacks", strong=False)
        payload.preInstall()

    pre_install.append(Task("Find additional packages & run preInstall()", run_pre_install))
    installation_queue.append(pre_install)

    payload_install = TaskQueue("Payload installation", N_("Installing."))
    payload_install.append(Task("Install the payload", payload.install))
    installation_queue.append(payload_install)

    # for some payloads storage is configured after the payload is installed
    late_storage = TaskQueue("Late storage configuration", N_("Configuring storage"))
    late_storage.append(Task("Write late storage", payload.writeStorageLate))
    installation_queue.append(late_storage)

    # Do bootloader.
    if can_install_bootloader:
        bootloader_install = TaskQueue("Bootloader installation", N_("Installing boot loader"))
        bootloader_install.append(Task("Install bootloader", writeBootLoader, (storage, payload, instClass, ksdata)))
        installation_queue.append(bootloader_install)

    post_install = TaskQueue("Post-installation setup tasks", (N_("Performing post-installation setup tasks")))
    post_install.append(Task("Run post-installation setup tasks", payload.postInstall))
    installation_queue.append(post_install)

    # Create snapshot
    if ksdata.snapshot and ksdata.snapshot.has_snapshot(SNAPSHOT_WHEN_POST_INSTALL):
        snapshot_creation = TaskQueue("Creating post installation snapshots", N_("Creating snapshots"))
        snapshot_creation.append(Task("Create post-install snapshots", ksdata.snapshot.execute, (storage, ksdata, instClass)))
        installation_queue.append(snapshot_creation)

    # notify progress tracking about the number of steps
    progress_init(installation_queue.task_count)
    # log contents of the main task queue
    log.info(installation_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = util.item_counter(installation_queue.queue_count)
    task_started_counter = util.item_counter(installation_queue.task_count)
    task_completed_counter = util.item_counter(installation_queue.task_count)
    installation_queue.queue_started.connect(lambda x: log.info("Queue started: %s (%s)", x.name, next(queue_counter)))
    installation_queue.task_started.connect(lambda x: log.info("Task started: %s (%s)", x.name, next(task_started_counter)))
    installation_queue.task_completed.connect(lambda x: log.debug("Task completed: %s (%s) (%1.1f s)",
                                                                  x.name, next(task_completed_counter),
                                                                  x.elapsed_time))
    # start the task queue
    installation_queue.start()
    # done
    progress_complete()