예제 #1
0
    def run_automatically(storage, data, callback=None):
        """Run the DASD formatting automatically.

        This method could be run in a separate thread.
        """
        if not flags.automatedInstall:
            return

        if not DasdFormatting.is_supported():
            return

        disks = getDisks(storage.devicetree)

        formatting = DasdFormatting()
        formatting.update_restrictions(data)
        formatting.search_disks(disks)

        if not formatting.should_run():
            return

        if callback:
            formatting.report.connect(callback)

        formatting.run(storage, data)

        if callback:
            formatting.report.disconnect(callback)
예제 #2
0
파일: storage.py 프로젝트: jresch/anaconda
 def _doExecute(self):
     self._ready = False
     hubQ.send_not_ready(self.__class__.__name__)
     # on the off-chance dasdfmt is running, we can't proceed further
     threadMgr.wait(constants.THREAD_DASDFMT)
     hubQ.send_message(self.__class__.__name__, _("Saving storage configuration..."))
     try:
         doKickstartStorage(self.storage, self.data, self.instclass)
     except (StorageError, KickstartValueError) as e:
         log.error("storage configuration failed: %s", e)
         StorageChecker.errors = str(e).split("\n")
         hubQ.send_message(self.__class__.__name__, _("Failed to save storage configuration..."))
         self.data.bootloader.bootDrive = ""
         self.data.ignoredisk.drives = []
         self.data.ignoredisk.onlyuse = []
         self.storage.config.update(self.data)
         self.storage.reset()
         self.disks = getDisks(self.storage.devicetree)
         # now set ksdata back to the user's specified config
         applyDiskSelection(self.storage, self.data, self.selected_disks)
     except BootLoaderError as e:
         log.error("BootLoader setup failed: %s", e)
         StorageChecker.errors = str(e).split("\n")
         hubQ.send_message(self.__class__.__name__, _("Failed to save storage configuration..."))
         self.data.bootloader.bootDrive = ""
     else:
         if self.autopart:
             self.run()
     finally:
         resetCustomStorageData(self.data)
         self._ready = True
         hubQ.send_ready(self.__class__.__name__, True)
예제 #3
0
    def __init__(self, data, storage, payload, instclass):
        NormalTUISpoke.__init__(self, data, storage, payload, instclass)

        self.title = N_("Installation Destination")
        self._ready = False
        self._container = None
        self.selected_disks = self.data.ignoredisk.onlyuse[:]
        self.select_all = False

        self.autopart = None

        # This list gets set up once in initialize and should not be modified
        # except perhaps to add advanced devices. It will remain the full list
        # of disks that can be included in the install.
        self.disks = []
        self.errors = []
        self.warnings = []

        if self.data.zerombr.zerombr and arch.is_s390():
            # if zerombr is specified in a ks file and there are unformatted
            # dasds, automatically format them. pass in storage.devicetree here
            # instead of storage.disks since media_present is checked on disks;
            # a dasd needing dasdfmt will fail this media check though
            to_format = [
                d for d in getDisks(self.storage.devicetree) if
                d.type == "dasd" and blockdev.s390.dasd_needs_format(d.busid)
            ]
            if to_format:
                self.run_dasdfmt(to_format)

        if not flags.automatedInstall:
            # default to using autopart for interactive installs
            self.data.autopart.autopart = True
예제 #4
0
파일: storage.py 프로젝트: evol262/anaconda
    def execute(self):
        # Spawn storage execution as a separate thread so there's no big delay
        # going back from this spoke to the hub while StorageChecker.run runs.
        # Yes, this means there's a thread spawning another thread.  Sorry.
        threadMgr.add(AnacondaThread(name=constants.THREAD_EXECUTE_STORAGE,
                                     target=self._doExecute))

        # Register iSCSI to kickstart data
        iscsi_devices = []
        # Find all selected disks and add all iscsi disks to iscsi_devices list
        for d in [d for d in getDisks(self.storage.devicetree) if d.name in self.selected_disks]:
            # Get parents of a multipath devices
            if isinstance(d, MultipathDevice):
                for parent_dev in d.parents:
                    if isinstance(parent_dev, iScsiDiskDevice) and not parent_dev.ibft:
                        iscsi_devices.append(parent_dev)
            # Add no-ibft iScsiDiskDevice. IBFT disks are added automatically so there is
            # no need to have them in KS.
            elif isinstance(d, iScsiDiskDevice) and not d.ibft:
                iscsi_devices.append(d)

        if iscsi_devices:
            self.data.iscsiname.iscsiname = self.storage.iscsi.initiator
            # Remove the old iscsi data information and generate new one
            self.data.iscsi.iscsi = []
            for device in iscsi_devices:
                iscsi_data = self._create_iscsi_data(device)
                for saved_iscsi in self.data.iscsi.iscsi:
                    if (iscsi_data.ipaddr == saved_iscsi.ipaddr and
                        iscsi_data.target == saved_iscsi.target and
                        iscsi_data.port == saved_iscsi.port):
                        break
                else:
                    self.data.iscsi.iscsi.append(iscsi_data)
예제 #5
0
 def _doExecute(self):
     self._ready = False
     hubQ.send_not_ready(self.__class__.__name__)
     # on the off-chance dasdfmt is running, we can't proceed further
     threadMgr.wait(constants.THREAD_DASDFMT)
     hubQ.send_message(self.__class__.__name__,
                       _("Saving storage configuration..."))
     try:
         doKickstartStorage(self.storage, self.data, self.instclass)
     except (StorageError, KickstartValueError) as e:
         log.error("storage configuration failed: %s", e)
         StorageChecker.errors = str(e).split("\n")
         hubQ.send_message(self.__class__.__name__,
                           _("Failed to save storage configuration..."))
         self.data.bootloader.bootDrive = ""
         self.data.ignoredisk.drives = []
         self.data.ignoredisk.onlyuse = []
         self.storage.config.update(self.data)
         self.storage.reset()
         self.disks = getDisks(self.storage.devicetree)
         # now set ksdata back to the user's specified config
         applyDiskSelection(self.storage, self.data, self.selected_disks)
     except BootLoaderError as e:
         log.error("BootLoader setup failed: %s", e)
         StorageChecker.errors = str(e).split("\n")
         hubQ.send_message(self.__class__.__name__,
                           _("Failed to save storage configuration..."))
         self.data.bootloader.bootDrive = ""
     else:
         if self.autopart:
             self.run()
     finally:
         resetCustomStorageData(self.data)
         self._ready = True
         hubQ.send_ready(self.__class__.__name__, True)
예제 #6
0
    def refresh(self):
        self.disks = getDisks(self.storage.devicetree)

        # synchronize our local data store with the global ksdata
        disk_names = [d.name for d in self.disks]
        # don't put disks with hidden formats in selected_disks
        self.selected_disks = [d for d in self.data.ignoredisk.onlyuse
                                    if d in disk_names]
        self.autopart = self.data.autopart.autopart
        self.autoPartType = self.data.autopart.type
        if self.autoPartType is None:
            # nkwin7 add begin
            # keywords: default partitioning; defaultFS; autopart type; 
            #           add combo; delete refresh button
            #reset autopart type to be AUTOPART_TYPE_PLAIN
            #self.autoPartType = AUTOPART_TYPE_LVM
            self.autoPartType = AUTOPART_TYPE_PLAIN
            # nkwin7 end
        self.encrypted = self.data.autopart.encrypted
        self.passphrase = self.data.autopart.passphrase

        self._previous_autopart = self.autopart

        # First, remove all non-button children.
        for child in self.localOverviews + self.advancedOverviews:
            child.destroy()

        # Then deal with local disks, which are really easy.  They need to be
        # handled here instead of refresh to take into account the user pressing
        # the rescan button on custom partitioning.
        for disk in filter(isLocalDisk, self.disks):
            self._add_disk_overview(disk, self.local_disks_box)

        # Advanced disks are different.  Because there can potentially be a lot
        # of them, we do not display them in the box by default.  Instead, only
        # those selected in the filter UI are displayed.  This means refresh
        # needs to know to create and destroy overviews as appropriate.
        for name in self.data.ignoredisk.onlyuse:
            obj = self.storage.devicetree.getDeviceByName(name, hidden=True)
            if isLocalDisk(obj):
                continue

            self._add_disk_overview(obj, self.specialized_disks_box)

        # update the selections in the ui
        for overview in self.localOverviews + self.advancedOverviews:
            name = overview.get_property("name")
            # nkwin7 add begin
            # keywords:indirect and direct; default selected disks; show correctly messages
            #overview.set_chosen(name in self.selected_disks)
            # we default select all disks.
            overview.set_chosen(True)
            # nkwin7 end

        self._update_summary()

        if self.errors:
            self.set_warning(_("Error checking storage configuration.  Click for details."))
        elif self.warnings:
            self.set_warning(_("Warning checking storage configuration.  Click for details."))
예제 #7
0
 def _doExecute(self):
     self._ready = False
     # nkwin7 add begin
     # keywords:indirect and direct; default selected disks; show correctly messages
     # the indirect spoke need not communicate with hub.
     #hubQ.send_not_ready(self.__class__.__name__)
     #hubQ.send_message(self.__class__.__name__, _("Saving storage configuration..."))
     try:
         doKickstartStorage(self.storage, self.data, self.instclass)
     except StorageError as e:
         log.error("storage configuration failed: %s" % e)
         StorageChecker.errors = str(e).split("\n")
         #hubQ.send_message(self.__class__.__name__, _("Failed to save storage configuration..."))
         self.data.ignoredisk.drives = []
         self.data.ignoredisk.onlyuse = []
         self.storage.config.update(self.data)
         self.storage.reset()
         self.disks = getDisks(self.storage.devicetree)
         # now set ksdata back to the user's specified config
         self._applyDiskSelection(self.selected_disks)
     else:
         if self.autopart:
             # this was already run as part of doAutoPartition. dumb.
             StorageChecker.errors = []
             StorageChecker.warnings = []
             self.run()
     finally:
         self._ready = True
예제 #8
0
    def _initialize(self):
        # nkwin7 add begin
        # keywords:indirect and direct; default selected disks; show correctly messages
        # the indirect spoke need not communicate with hub.
        #hubQ.send_message(self.__class__.__name__, _("Probing storage..."))

        threadMgr.wait(constants.THREAD_STORAGE)
        threadMgr.wait(constants.THREAD_CUSTOM_STORAGE_INIT)

        self.disks = getDisks(self.storage.devicetree)

        # if there's only one disk, select it by default
        if len(self.disks) == 1 and not self.selected_disks:
            self._applyDiskSelection([self.disks[0].name])

        self._ready = True

        # we should execute this function at here.
        # because we need wait THREAD_STORAGE and THREAD_CUSTOM_STORAGE_INIT
        self._backendExecute()
        
        # we add a feature which achieves auto partition at the begining of 
        # the installation, so we need wait THREAD_EXECUTE_STORAGE and 
        # THREAD_CHECK_STORAGE.
        threadMgr.wait(constants.THREAD_EXECUTE_STORAGE)
        threadMgr.wait(constants.THREAD_CHECK_STORAGE)
        
        # we need update the spoke status in the hub gui.
        self.spokeClass._ready = True
        self.hubClass._updateCompleteness(self.spokeClass)
예제 #9
0
 def _doExecute(self):
     self._ready = False
     hubQ.send_not_ready(self.__class__.__name__)
     hubQ.send_message(self.__class__.__name__, _("Saving storage configuration..."))
     try:
         doKickstartStorage(self.storage, self.data, self.instclass)
     except (StorageError, KickstartValueError) as e:
         log.error("storage configuration failed: %s", e)
         StorageChecker.errors = str(e).split("\n")
         hubQ.send_message(self.__class__.__name__, _("Failed to save storage configuration..."))
         self.data.bootloader.bootDrive = ""
         self.data.ignoredisk.drives = []
         self.data.ignoredisk.onlyuse = []
         self.storage.config.update(self.data)
         self.storage.reset()
         self.disks = getDisks(self.storage.devicetree)
         # now set ksdata back to the user's specified config
         self._applyDiskSelection(self.selected_disks)
     except BootLoaderError as e:
         log.error("BootLoader setup failed: %s", e)
         StorageChecker.errors = str(e).split("\n")
         hubQ.send_message(self.__class__.__name__, _("Failed to save storage configuration..."))
         self.data.bootloader.bootDrive = ""
     else:
         if self.autopart:
             # this was already run as part of doAutoPartition. dumb.
             StorageChecker.errors = []
             StorageChecker.warnings = []
             self.run()
     finally:
         self._ready = True
         hubQ.send_ready(self.__class__.__name__, True)
예제 #10
0
파일: storage.py 프로젝트: numbnet/anaconda
 def _doExecute(self):
     self._ready = False
     hubQ.send_not_ready(self.__class__.__name__)
     hubQ.send_message(self.__class__.__name__,
                       _("Saving storage configuration..."))
     try:
         doKickstartStorage(self.storage, self.data, self.instclass)
     except (StorageError, KickstartValueError) as e:
         log.error("storage configuration failed: %s", e)
         StorageChecker.errors = str(e).split("\n")
         hubQ.send_message(self.__class__.__name__,
                           _("Failed to save storage configuration..."))
         self.data.bootloader.bootDrive = ""
         self.data.ignoredisk.drives = []
         self.data.ignoredisk.onlyuse = []
         self.storage.config.update(self.data)
         self.storage.reset()
         self.disks = getDisks(self.storage.devicetree)
         # now set ksdata back to the user's specified config
         self._applyDiskSelection(self.selected_disks)
     except BootLoaderError as e:
         log.error("BootLoader setup failed: %s", e)
         StorageChecker.errors = str(e).split("\n")
         hubQ.send_message(self.__class__.__name__,
                           _("Failed to save storage configuration..."))
         self.data.bootloader.bootDrive = ""
     else:
         if self.autopart:
             # this was already run as part of doAutoPartition. dumb.
             StorageChecker.errors = []
             StorageChecker.warnings = []
             self.run()
     finally:
         self._ready = True
         hubQ.send_ready(self.__class__.__name__, True)
예제 #11
0
    def run_automatically(storage, data, callback=None):
        """Run the DASD formatting automatically.

        This method could be run in a separate thread.
        """
        if not flags.automatedInstall:
            return

        if not DasdFormatting.is_supported():
            return

        disks = getDisks(storage.devicetree)

        formatting = DasdFormatting()
        formatting.update_restrictions()
        formatting.search_disks(disks)

        if not formatting.should_run():
            return

        if callback:
            formatting.report.connect(callback)

        formatting.run(storage, data)

        if callback:
            formatting.report.disconnect(callback)
예제 #12
0
파일: storage.py 프로젝트: yaneti/anaconda
    def run_dasdfmt(self):
        """
        Though the same function exists in pyanaconda.ui.gui.spokes.lib.dasdfmt,
        this instance doesn't include any of the UI pieces and should only
        really be getting called on ks installations with "zerombr".
        """
        # wait for the initial storage thread to complete before taking any new
        # actions on storage devices
        threadMgr.wait(constants.THREAD_STORAGE)

        to_format = (
            d for d in getDisks(self.storage.devicetree)
            if d.type == "dasd" and blockdev.s390.dasd_needs_format(d.busid))
        if not to_format:
            # nothing to do here; bail
            return

        hubQ.send_message(self.__class__.__name__, _("Formatting DASDs"))
        for disk in to_format:
            try:
                blockdev.s390.dasd_format(disk.name)
            except blockdev.S390Error as err:
                # Log errors if formatting fails, but don't halt the installer
                log.error(str(err))
                continue
예제 #13
0
    def __init__(self, app, data, storage, payload, instclass):
        NormalTUISpoke.__init__(self, app, data, storage, payload, instclass)

        self._ready = False
        self.selected_disks = self.data.ignoredisk.onlyuse[:]
        self.selection = None

        self.autopart = None
        self.clearPartType = None

        # This list gets set up once in initialize and should not be modified
        # except perhaps to add advanced devices. It will remain the full list
        # of disks that can be included in the install.
        self.disks = []
        self.errors = []
        self.warnings = []

        if self.data.zerombr.zerombr and arch.isS390():
            # if zerombr is specified in a ks file and there are unformatted
            # dasds, automatically format them. pass in storage.devicetree here
            # instead of storage.disks since mediaPresent is checked on disks;
            # a dasd needing dasdfmt will fail this media check though
            to_format = storage.devicetree.make_unformatted_dasd_list(
                getDisks(self.storage.devicetree))
            if to_format:
                self.run_dasdfmt(to_format)

        if not flags.automatedInstall:
            # default to using autopart for interactive installs
            self.data.autopart.autopart = True
예제 #14
0
    def __init__(self, app, data, storage, payload, instclass):
        NormalTUISpoke.__init__(self, app, data, storage, payload, instclass)

        self._ready = False
        self.selected_disks = self.data.ignoredisk.onlyuse[:]
        self.selection = None

        self.autopart = None
        self.clearPartType = None

        # This list gets set up once in initialize and should not be modified
        # except perhaps to add advanced devices. It will remain the full list
        # of disks that can be included in the install.
        self.disks = []
        self.errors = []
        self.warnings = []

        if self.data.zerombr.zerombr and arch.isS390():
            # if zerombr is specified in a ks file and there are unformatted
            # dasds, automatically format them. pass in storage.devicetree here
            # instead of storage.disks since mediaPresent is checked on disks;
            # a dasd needing dasdfmt will fail this media check though
            to_format = storage.devicetree.make_unformatted_dasd_list(getDisks(self.storage.devicetree))
            if to_format:
                self.run_dasdfmt(to_format)

        if not flags.automatedInstall:
            # default to using autopart for interactive installs
            self.data.autopart.autopart = True
예제 #15
0
    def update_disks(self):
        threadMgr.wait(THREAD_STORAGE)

        self.disks = sorted(getDisks(self.storage.devicetree),
                            key=lambda d: d.name)
        # if only one disk is available, go ahead and mark it as selected
        if len(self.disks) == 1:
            self._update_disk_list(self.disks[0])
예제 #16
0
파일: storage.py 프로젝트: numbnet/anaconda
    def refresh(self):
        self.disks = getDisks(self.storage.devicetree)

        # synchronize our local data store with the global ksdata
        disk_names = [d.name for d in self.disks]
        # don't put disks with hidden formats in selected_disks
        self.selected_disks = [
            d for d in self.data.ignoredisk.onlyuse if d in disk_names
        ]
        self.autopart = self.data.autopart.autopart
        self.autoPartType = self.data.autopart.type
        if self.autoPartType is None:
            self.autoPartType = AUTOPART_TYPE_LVM
        self.encrypted = self.data.autopart.encrypted
        self.passphrase = self.data.autopart.passphrase

        self._previous_autopart = self.autopart

        # First, remove all non-button children.
        for child in self.localOverviews + self.advancedOverviews:
            child.destroy()

        # Then deal with local disks, which are really easy.  They need to be
        # handled here instead of refresh to take into account the user pressing
        # the rescan button on custom partitioning.
        for disk in filter(isLocalDisk, self.disks):
            self._add_disk_overview(disk, self.local_disks_box)

        # Advanced disks are different.  Because there can potentially be a lot
        # of them, we do not display them in the box by default.  Instead, only
        # those selected in the filter UI are displayed.  This means refresh
        # needs to know to create and destroy overviews as appropriate.
        for name in self.data.ignoredisk.onlyuse:
            if name not in disk_names:
                continue
            obj = self.storage.devicetree.getDeviceByName(name, hidden=True)
            if isLocalDisk(obj):
                continue

            self._add_disk_overview(obj, self.specialized_disks_box)

        # update the selections in the ui
        for overview in self.localOverviews + self.advancedOverviews:
            name = overview.get_property("name")
            overview.set_chosen(name in self.selected_disks)

        self._update_summary()

        if self.errors:
            self.set_warning(
                _("Error checking storage configuration.  Click for details."))
        elif self.warnings:
            self.set_warning(
                _("Warning checking storage configuration.  Click for details."
                  ))
예제 #17
0
    def _initialize(self):
        hubQ.send_message(self.__class__.__name__, _("Probing storage..."))

        threadMgr.wait(constants.THREAD_STORAGE)
        threadMgr.wait(constants.THREAD_CUSTOM_STORAGE_INIT)

        self.disks = getDisks(self.storage.devicetree)

        # if there's only one disk, select it by default
        if len(self.disks) == 1 and not self.selected_disks:
            applyDiskSelection(self.storage, self.data, [self.disks[0].name])

        self._ready = True
        hubQ.send_ready(self.__class__.__name__, False)
예제 #18
0
    def _initialize(self):
        hubQ.send_message(self.__class__.__name__, _(constants.PAYLOAD_STATUS_PROBING_STORAGE))

        threadMgr.wait(constants.THREAD_STORAGE)
        threadMgr.wait(constants.THREAD_CUSTOM_STORAGE_INIT)

        self.disks = getDisks(self.storage.devicetree)

        # if there's only one disk, select it by default
        if len(self.disks) == 1 and not self.selected_disks:
            applyDiskSelection(self.storage, self.data, [self.disks[0].name])

        self._ready = True
        hubQ.send_ready(self.__class__.__name__, False)
예제 #19
0
    def refresh(self):
        super().refresh()

        self.disks = getDisks(self.storage.devicetree)

        disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION)
        self.selected_disks = disk_select_proxy.SelectedDisks

        self.ancestors = [
            d.name for disk in self.disks for d in self._real_ancestors(disk)
        ]

        self._store.clear()

        allDisks = []
        multipathDisks = []
        otherDisks = []
        nvdimmDisks = []
        zDisks = []

        # Now all all the non-local disks to the store.  Everything has been set up
        # ahead of time, so there's no need to configure anything.  We first make
        # these lists of disks, then call setup on each individual page.  This is
        # because there could be page-specific setup to do that requires a complete
        # view of all the disks on that page.
        for disk in self.disks:
            if self.pages[PAGE_MULTIPATH].ismember(disk):
                multipathDisks.append(disk)
            elif self.pages[PAGE_OTHER].ismember(disk):
                otherDisks.append(disk)
            elif self.pages[PAGE_NVDIMM].ismember(disk):
                nvdimmDisks.append(disk)
            elif self.pages[PAGE_Z].ismember(disk):
                zDisks.append(disk)

            allDisks.append(disk)

        self.pages[PAGE_SEARCH].setup(self._store, self.selected_disks,
                                      allDisks)
        self.pages[PAGE_MULTIPATH].setup(self._store, self.selected_disks,
                                         multipathDisks)
        self.pages[PAGE_OTHER].setup(self._store, self.selected_disks,
                                     otherDisks)
        self.pages[PAGE_NVDIMM].setup(self._store, self.selected_disks,
                                      nvdimmDisks)
        self.pages[PAGE_Z].setup(self._store, self.selected_disks, zDisks)

        self._update_summary()
예제 #20
0
    def _initialize(self):
        """
        Secondary initialize so wait for the storage thread to complete before
        populating our disk list
        """

        threadMgr.wait(THREAD_STORAGE)

        self.disks = sorted(getDisks(self.storage.devicetree),
                            key=lambda d: d.name)
        # if only one disk is available, go ahead and mark it as selected
        if len(self.disks) == 1:
            self._update_disk_list(self.disks[0])

        self._update_summary()
        self._ready = True
예제 #21
0
    def _initialize(self):
        """
        Secondary initialize so wait for the storage thread to complete before
        populating our disk list
        """

        threadMgr.wait(THREAD_STORAGE)

        self.disks = sorted(getDisks(self.storage.devicetree),
                            key=lambda d: d.name)
        # if only one disk is available, go ahead and mark it as selected
        if len(self.disks) == 1:
            self._update_disk_list(self.disks[0])

        self._update_summary()
        self._ready = True
예제 #22
0
 def _doExecute(self):
     self._ready = False
     hubQ.send_not_ready(self.__class__.__name__)
     # on the off-chance dasdfmt is running, we can't proceed further
     threadMgr.wait(constants.THREAD_DASDFMT)
     hubQ.send_message(self.__class__.__name__,
                       _("Saving storage configuration..."))
     if flags.automatedInstall and self.data.autopart.encrypted and not self.data.autopart.passphrase:
         self.autopart_missing_passphrase = True
         StorageChecker.errors = [
             _("Passphrase for autopart encryption not specified.")
         ]
         self._ready = True
         hubQ.send_ready(self.__class__.__name__, True)
         return
     try:
         doKickstartStorage(self.storage, self.data, self.instclass)
     except (StorageError, KickstartParseError) as e:
         log.error("storage configuration failed: %s", e)
         StorageChecker.errors = str(e).split("\n")
         hubQ.send_message(self.__class__.__name__,
                           _("Failed to save storage configuration..."))
         self.data.bootloader.bootDrive = ""
         self.data.ignoredisk.drives = []
         self.data.ignoredisk.onlyuse = []
         self.storage.config.update(self.data)
         self.storage.reset()
         self.disks = getDisks(self.storage.devicetree)
         # now set ksdata back to the user's specified config
         applyDiskSelection(self.storage, self.data, self.selected_disks)
     except BootLoaderError as e:
         log.error("BootLoader setup failed: %s", e)
         StorageChecker.errors = str(e).split("\n")
         hubQ.send_message(self.__class__.__name__,
                           _("Failed to save storage configuration..."))
         self.data.bootloader.bootDrive = ""
     else:
         if self.autopart or (flags.automatedInstall and
                              (self.data.autopart.autopart
                               or self.data.partition.seen)):
             # run() executes StorageChecker.checkStorage in a seperate threat
             self.run()
     finally:
         resetCustomStorageData(self.data)
         self._ready = True
         hubQ.send_ready(self.__class__.__name__, True)
예제 #23
0
    def refresh(self):
        super().refresh()

        self.disks = getDisks(self.storage.devicetree)

        disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION)
        self.selected_disks = disk_select_proxy.SelectedDisks

        self.ancestors = [d.name for disk in self.disks for d in self._real_ancestors(disk)]

        self._store.clear()

        allDisks = []
        multipathDisks = []
        otherDisks = []
        nvdimmDisks = []
        zDisks = []

        # Now all all the non-local disks to the store.  Everything has been set up
        # ahead of time, so there's no need to configure anything.  We first make
        # these lists of disks, then call setup on each individual page.  This is
        # because there could be page-specific setup to do that requires a complete
        # view of all the disks on that page.
        for disk in self.disks:
            if self.pages[PAGE_MULTIPATH].ismember(disk):
                multipathDisks.append(disk)
            elif self.pages[PAGE_OTHER].ismember(disk):
                otherDisks.append(disk)
            elif self.pages[PAGE_NVDIMM].ismember(disk):
                nvdimmDisks.append(disk)
            elif self.pages[PAGE_Z].ismember(disk):
                zDisks.append(disk)

            allDisks.append(disk)

        self.pages[PAGE_SEARCH].setup(self._store, self.selected_disks, allDisks)
        self.pages[PAGE_MULTIPATH].setup(self._store, self.selected_disks, multipathDisks)
        self.pages[PAGE_OTHER].setup(self._store, self.selected_disks, otherDisks)
        self.pages[PAGE_NVDIMM].setup(self._store, self.selected_disks, nvdimmDisks)
        self.pages[PAGE_Z].setup(self._store, self.selected_disks, zDisks)

        self._update_summary()
예제 #24
0
파일: filter.py 프로젝트: mykntom/anaconda
    def refresh(self):
        NormalSpoke.refresh(self)

        self.disks = getDisks(self.storage.devicetree)
        self.selected_disks = self.data.ignoredisk.onlyuse[:]

        self.ancestors = itertools.chain(
            *map(self._real_ancestors, self.disks))
        self.ancestors = map(lambda d: d.name, self.ancestors)

        self._store.clear()

        allDisks = []
        multipathDisks = []
        otherDisks = []
        raidDisks = []
        zDisks = []

        # Now all all the non-local disks to the store.  Everything has been set up
        # ahead of time, so there's no need to configure anything.  We first make
        # these lists of disks, then call setup on each individual page.  This is
        # because there could be page-specific setup to do that requires a complete
        # view of all the disks on that page.
        for disk in itertools.ifilterfalse(isLocalDisk, self.disks):
            if self.pages[1].ismember(disk):
                multipathDisks.append(disk)
            elif self.pages[2].ismember(disk):
                otherDisks.append(disk)
            elif self.pages[3].ismember(disk):
                raidDisks.append(disk)
            elif self.pages[4].ismember(disk):
                zDisks.append(disk)

            allDisks.append(disk)

        self.pages[0].setup(self._store, self.selected_disks, allDisks)
        self.pages[1].setup(self._store, self.selected_disks, multipathDisks)
        self.pages[2].setup(self._store, self.selected_disks, otherDisks)
        self.pages[3].setup(self._store, self.selected_disks, raidDisks)
        self.pages[4].setup(self._store, self.selected_disks, zDisks)

        self._update_summary()
예제 #25
0
    def refresh(self):
        NormalSpoke.refresh(self)

        self.disks = getDisks(self.storage.devicetree)
        self.selected_disks = self.data.ignoredisk.onlyuse[:]

        self.ancestors = itertools.chain(*map(self._real_ancestors, self.disks))
        self.ancestors = map(lambda d: d.name, self.ancestors)

        self._store.clear()

        allDisks = []
        multipathDisks = []
        otherDisks = []
        raidDisks = []
        zDisks = []

        # Now all all the non-local disks to the store.  Everything has been set up
        # ahead of time, so there's no need to configure anything.  We first make
        # these lists of disks, then call setup on each individual page.  This is
        # because there could be page-specific setup to do that requires a complete
        # view of all the disks on that page.
        for disk in itertools.ifilterfalse(isLocalDisk, self.disks):
            if self.pages[1].ismember(disk):
                multipathDisks.append(disk)
            elif self.pages[2].ismember(disk):
                otherDisks.append(disk)
            elif self.pages[3].ismember(disk):
                raidDisks.append(disk)
            elif self.pages[4].ismember(disk):
                zDisks.append(disk)

            allDisks.append(disk)

        self.pages[0].setup(self._store, self.selected_disks, allDisks)
        self.pages[1].setup(self._store, self.selected_disks, multipathDisks)
        self.pages[2].setup(self._store, self.selected_disks, otherDisks)
        self.pages[3].setup(self._store, self.selected_disks, raidDisks)
        self.pages[4].setup(self._store, self.selected_disks, zDisks)

        self._update_summary()
예제 #26
0
 def _doExecute(self):
     self._ready = False
     hubQ.send_not_ready(self.__class__.__name__)
     # on the off-chance dasdfmt is running, we can't proceed further
     threadMgr.wait(constants.THREAD_DASDFMT)
     hubQ.send_message(self.__class__.__name__, _("Saving storage configuration..."))
     if flags.automatedInstall and self.data.autopart.encrypted and not self.data.autopart.passphrase:
         self.autopart_missing_passphrase = True
         StorageChecker.errors = [_("Passphrase for autopart encryption not specified.")]
         self._ready = True
         hubQ.send_ready(self.__class__.__name__, True)
         return
     try:
         doKickstartStorage(self.storage, self.data, self.instclass)
     except (StorageError, KickstartParseError) as e:
         log.error("storage configuration failed: %s", e)
         StorageChecker.errors = str(e).split("\n")
         hubQ.send_message(self.__class__.__name__, _("Failed to save storage configuration..."))
         self.data.bootloader.bootDrive = ""
         self.data.ignoredisk.drives = []
         self.data.ignoredisk.onlyuse = []
         self.storage.config.update(self.data)
         self.storage.reset()
         self.disks = getDisks(self.storage.devicetree)
         # now set ksdata back to the user's specified config
         applyDiskSelection(self.storage, self.data, self.selected_disks)
     except BootLoaderError as e:
         log.error("BootLoader setup failed: %s", e)
         StorageChecker.errors = str(e).split("\n")
         hubQ.send_message(self.__class__.__name__, _("Failed to save storage configuration..."))
         self.data.bootloader.bootDrive = ""
     else:
         if self.autopart or (flags.automatedInstall and (self.data.autopart.autopart or self.data.partition.seen)):
             # run() executes StorageChecker.checkStorage in a seperate threat
             self.run()
     finally:
         resetCustomStorageData(self.data)
         self._ready = True
         hubQ.send_ready(self.__class__.__name__, True)
예제 #27
0
파일: storage.py 프로젝트: yaneti/anaconda
    def execute(self):
        # Spawn storage execution as a separate thread so there's no big delay
        # going back from this spoke to the hub while StorageChecker.run runs.
        # Yes, this means there's a thread spawning another thread.  Sorry.
        threadMgr.add(
            AnacondaThread(name=constants.THREAD_EXECUTE_STORAGE,
                           target=self._doExecute))

        # Register iSCSI to kickstart data
        iscsi_devices = []
        # Find all selected disks and add all iscsi disks to iscsi_devices list
        for d in [
                d for d in getDisks(self.storage.devicetree)
                if d.name in self.selected_disks
        ]:
            # Get parents of a multipath devices
            if isinstance(d, MultipathDevice):
                for parent_dev in d.parents:
                    if isinstance(parent_dev,
                                  iScsiDiskDevice) and not parent_dev.ibft:
                        iscsi_devices.append(parent_dev)
            # Add no-ibft iScsiDiskDevice. IBFT disks are added automatically so there is
            # no need to have them in KS.
            elif isinstance(d, iScsiDiskDevice) and not d.ibft:
                iscsi_devices.append(d)

        if iscsi_devices:
            self.data.iscsiname.iscsiname = self.storage.iscsi.initiator
            # Remove the old iscsi data information and generate new one
            self.data.iscsi.iscsi = []
            for device in iscsi_devices:
                iscsi_data = self._create_iscsi_data(device)
                for saved_iscsi in self.data.iscsi.iscsi:
                    if (iscsi_data.ipaddr == saved_iscsi.ipaddr
                            and iscsi_data.target == saved_iscsi.target
                            and iscsi_data.port == saved_iscsi.port):
                        break
                else:
                    self.data.iscsi.iscsi.append(iscsi_data)
예제 #28
0
    def run_dasdfmt(self):
        """
        Though the same function exists in pyanaconda.ui.gui.spokes.lib.dasdfmt,
        this instance doesn't include any of the UI pieces and should only
        really be getting called on ks installations with "zerombr".
        """
        # wait for the initial storage thread to complete before taking any new
        # actions on storage devices
        threadMgr.wait(constants.THREAD_STORAGE)

        to_format = self.storage.devicetree.make_unformatted_dasd_list(d for d in getDisks(self.storage.devicetree))
        if not to_format:
            # nothing to do here; bail
            return

        hubQ.send_message(self.__class__.__name__, _("Formatting DASDs"))
        for disk in to_format:
            try:
                blockdev.s390.dasd_format(disk.name)
            except blockdev.S390Error as err:
                # Log errors if formatting fails, but don't halt the installer
                log.error(str(err))
                continue
예제 #29
0
    def on_back_clicked(self, button):
        # We can't exit early if it looks like nothing has changed because the
        # user might want to change settings presented in the dialogs shown from
        # within this method.

        # Do not enter this method multiple times if user clicking multiple times
        # on back button
        if self._back_clicked:
            return
        else:
            self._back_clicked = True

        # make sure the snapshot of unmodified on-disk-storage model is created
        if not on_disk_storage.created:
            on_disk_storage.create_snapshot(self.storage)

        if self.autopart_missing_passphrase:
            self._setup_passphrase()
            NormalSpoke.on_back_clicked(self, button)
            return

        # No disks selected?  The user wants to back out of the storage spoke.
        if not self.selected_disks:
            NormalSpoke.on_back_clicked(self, button)
            return

        disk_selection_changed = False
        if self._last_selected_disks:
            disk_selection_changed = (self._last_selected_disks != set(self.selected_disks))

        # remember the disk selection for future decisions
        self._last_selected_disks = set(self.selected_disks)

        if disk_selection_changed:
            # Changing disk selection is really, really complicated and has
            # always been causing numerous hard bugs. Let's not play the hero
            # game and just revert everything and start over again.
            on_disk_storage.reset_to_snapshot(self.storage)
            self.disks = getDisks(self.storage.devicetree)
        else:
            # Remove all non-existing devices if autopart was active when we last
            # refreshed.
            if self._previous_autopart:
                self._previous_autopart = False
                self._remove_nonexistant_partitions()

        # hide disks as requested
        self._hide_disks()

        # make sure no containers were split up by the user's disk selection
        self.clear_info()

        # if there are some disk selection errors we don't let user to leave the
        # spoke, so these errors don't have to go to self.errors
        self.disks_errors = checkDiskSelection(self.storage, self.selected_disks)
        if self.disks_errors:
            # The disk selection has to make sense before we can proceed.
            self.set_error(_("There was a problem with your disk selection. "
                             "Click here for details."))
            self._back_clicked = False
            return

        if arch.isS390():
            # check for unformatted DASDs and launch dasdfmt if any discovered
            rc = self._check_dasd_formats()
            if rc == DASD_FORMAT_NO_CHANGE:
                pass
            elif rc == DASD_FORMAT_REFRESH:
                # User hit OK on the dialog
                self.refresh()
            elif rc == DASD_FORMAT_RETURN_TO_HUB:
                # User clicked uri to return to hub.
                NormalSpoke.on_back_clicked(self, button)
                return
            else:
                # User either hit cancel on the dialog or closed it via escape,
                # there was no formatting done.
                self._back_clicked = False
                return

        # even if they're not doing autopart, setting autopart.encrypted
        # establishes a default of encrypting new devices
        self.encrypted = self._encrypted.get_active()

        # We might first need to ask about an encryption passphrase.
        if self.encrypted and not self._setup_passphrase():
            self._back_clicked = False
            return

        # At this point there are three possible states:
        # 1) user chose custom part => just send them to the CustomPart spoke
        # 2) user wants to reclaim some more space => run the ResizeDialog
        # 3) we are just asked to do autopart => check free space and see if we need
        #                                        user to do anything more
        self.autopart = not self._customPart.get_active()
        disks = [d for d in self.disks if d.name in self.selected_disks]
        dialog = None
        if not self.autopart:
            self.skipTo = "CustomPartitioningSpoke"
        elif self._reclaim.get_active():
            # HINT: change the logic of this 'if' statement if we are asked to
            # support "reclaim before custom partitioning"

            # respect disk selection and other choices in the ReclaimDialog
            self.apply()
            dialog = ResizeDialog(self.data, self.storage, self.payload)
            dialog.refresh(disks)
        else:
            dialog = self._check_space_and_get_dialog(disks)

        if dialog:
            # more dialogs may need to be run based on user choices, but we are
            # only interested in the final result
            rc = self._run_dialogs(disks, start_with=dialog)

            if rc == RESPONSE_OK:
                # nothing special needed
                pass
            elif rc == RESPONSE_CANCEL:
                # A cancel button was clicked on one of the dialogs.  Stay on this
                # spoke.  Generally, this is because the user wants to add more disks.
                self._back_clicked = False
                return
            elif rc == RESPONSE_MODIFY_SW:
                # The "Fedora software selection" link was clicked on one of the
                # dialogs.  Send the user to the software spoke.
                self.skipTo = "SoftwareSelectionSpoke"
            elif rc == RESPONSE_QUIT:
                # Not enough space, and the user can't do anything about it so
                # they chose to quit.
                raise SystemExit("user-selected exit")
            else:
                # I don't know how we'd get here, but might as well have a
                # catch-all.  Just stay on this spoke.
                self._back_clicked = False
                return

        if self.autopart:
            refreshAutoSwapSize(self.storage)
        self.applyOnSkip = True
        NormalSpoke.on_back_clicked(self, button)
예제 #30
0
    def refresh(self):
        self._back_clicked = False

        self.disks = getDisks(self.storage.devicetree)

        # synchronize our local data store with the global ksdata
        disk_names = [d.name for d in self.disks]
        self.selected_disks = [d for d in self.data.ignoredisk.onlyuse
                               if d in disk_names]

        # unhide previously hidden disks so that they don't look like being
        # empty (because of all child devices hidden)
        self._unhide_disks()

        self.autopart = self.data.autopart.autopart
        self.autoPartType = self.data.autopart.type
        if self.autoPartType is None:
            self.autoPartType = AUTOPART_TYPE_LVM
        self.encrypted = self.data.autopart.encrypted
        self.passphrase = self.data.autopart.passphrase

        self._previous_autopart = self.autopart

        # First, remove all non-button children.
        for child in self.localOverviews + self.advancedOverviews:
            child.destroy()

        # Then deal with local disks, which are really easy.  They need to be
        # handled here instead of refresh to take into account the user pressing
        # the rescan button on custom partitioning.
        for disk in filter(isLocalDisk, self.disks):
            # While technically local disks, zFCP devices are specialized
            # storage and should not be shown here.
            if disk.type is not "zfcp":
                self._add_disk_overview(disk, self.local_disks_box)

        # Advanced disks are different.  Because there can potentially be a lot
        # of them, we do not display them in the box by default.  Instead, only
        # those selected in the filter UI are displayed.  This means refresh
        # needs to know to create and destroy overviews as appropriate.
        for name in self.data.ignoredisk.onlyuse:
            if name not in disk_names:
                continue
            obj = self.storage.devicetree.getDeviceByName(name, hidden=True)
            # since zfcp devices may be detected as local disks when added
            # manually, specifically check the disk type here to make sure
            # we won't accidentally bypass adding zfcp devices to the disk
            # overview
            if isLocalDisk(obj) and obj.type is not "zfcp":
                continue

            self._add_disk_overview(obj, self.specialized_disks_box)

        # update the selections in the ui
        for overview in self.localOverviews + self.advancedOverviews:
            name = overview.get_property("name")
            overview.set_chosen(name in self.selected_disks)

        # if encrypted is specified in kickstart, select the encryptionCheckbox in the GUI
        if self.encrypted:
            self._encrypted.set_active(True)

        self._customPart.set_active(not self.autopart)

        self._update_summary()

        if self.errors:
            self.set_warning(_("Error checking storage configuration.  <a href=\"\">Click for details.</a>"))
        elif self.warnings:
            self.set_warning(_("Warning checking storage configuration.  <a href=\"\">Click for details.</a>"))
예제 #31
0
    def refresh(self):
        self._back_clicked = False

        self.disks = getDisks(self.storage.devicetree)

        # synchronize our local data store with the global ksdata
        disk_names = [d.name for d in self.disks]
        self.selected_disks = [
            d for d in self.data.ignoredisk.onlyuse if d in disk_names
        ]

        # unhide previously hidden disks so that they don't look like being
        # empty (because of all child devices hidden)
        self._unhide_disks()

        self.autopart = self.data.autopart.autopart
        self.autoPartType = self.data.autopart.type
        if self.autoPartType is None:
            self.autoPartType = AUTOPART_TYPE_LVM
        self.encrypted = self.data.autopart.encrypted
        self.passphrase = self.data.autopart.passphrase

        self._previous_autopart = self.autopart

        # First, remove all non-button children.
        for child in self.localOverviews + self.advancedOverviews:
            child.destroy()

        # Then deal with local disks, which are really easy.  They need to be
        # handled here instead of refresh to take into account the user pressing
        # the rescan button on custom partitioning.
        for disk in filter(isLocalDisk, self.disks):
            # While technically local disks, zFCP devices are specialized
            # storage and should not be shown here.
            if disk.type is not "zfcp":
                self._add_disk_overview(disk, self.local_disks_box)

        # Advanced disks are different.  Because there can potentially be a lot
        # of them, we do not display them in the box by default.  Instead, only
        # those selected in the filter UI are displayed.  This means refresh
        # needs to know to create and destroy overviews as appropriate.
        for name in self.data.ignoredisk.onlyuse:
            if name not in disk_names:
                continue
            obj = self.storage.devicetree.getDeviceByName(name, hidden=True)
            # since zfcp devices may be detected as local disks when added
            # manually, specifically check the disk type here to make sure
            # we won't accidentally bypass adding zfcp devices to the disk
            # overview
            if isLocalDisk(obj) and obj.type is not "zfcp":
                continue

            self._add_disk_overview(obj, self.specialized_disks_box)

        # update the selections in the ui
        for overview in self.localOverviews + self.advancedOverviews:
            name = overview.get_property("name")
            overview.set_chosen(name in self.selected_disks)

        # if encrypted is specified in kickstart, select the encryptionCheckbox in the GUI
        if self.encrypted:
            self._encrypted.set_active(True)

        self._customPart.set_active(not self.autopart)

        self._update_summary()

        if self.errors:
            self.set_warning(
                _("Error checking storage configuration.  <a href=\"\">Click for details.</a>"
                  ))
        elif self.warnings:
            self.set_warning(
                _("Warning checking storage configuration.  <a href=\"\">Click for details.</a>"
                  ))
예제 #32
0
    def run_dasdfmt(self, to_format=None):
        """
        This generates the list of DASDs requiring dasdfmt and runs dasdfmt
        against them.

        to_format is an optional list of DASDs to format. This shouldn't be
        passed if run_dasdfmt is called during a ks installation, and if called
        during a manual installation, a list of DASDs needs to be passed.
        """
        if not to_format:
            # go ahead and initialize this
            to_format = []

        # if the storage thread is running, wait on it to complete before taking
        # any further actions on devices; most likely to occur if user has
        # zerombr in their ks file
        threadMgr.wait(THREAD_STORAGE)

        if flags.automatedInstall:
            # automated install case
            unformatted = []
            ldl = []

            if self.data.zerombr.zerombr:
                # unformatted DASDs
                unformatted += make_unformatted_dasd_list(
                    [d.name for d in getDisks(self.storage.devicetree)])
            if self.data.clearpart.cdl:
                # LDL DASDs
                ldl += [
                    d.name for d in self.storage.devicetree.dasd
                    if is_ldl_dasd(d.name)
                ]
            # combine into one nice list
            to_format = list(set(unformatted + ldl))
        else:
            # manual install; ask to verify they want to run dasdfmt
            # prepare our msg strings; copied directly from dasdfmt.glade
            summary = _(
                "The following unformatted or LDL DASDs have been "
                "detected on your system. You can choose to format them "
                "now with dasdfmt or cancel to leave them unformatted. "
                "Unformatted DASDs cannot be used during installation.\n\n")

            warntext = _(
                "Warning: All storage changes made using the installer will be lost when you choose to format.\n\nProceed to run dasdfmt?\n"
            )

            displaytext = summary + "\n".join(
                "/dev/" + d for d in to_format) + "\n" + warntext

            # now show actual prompt; note -- in cmdline mode, auto-answer for
            # this is 'no', so unformatted and ldl DASDs will remain so unless
            # zerombr or cdl are added to the ks file
            question_window = YesNoDialog(self._app, displaytext)
            self._app.switch_screen_modal(question_window)
            if not question_window.answer:
                # no? well fine then, back to the storage spoke with you;
                return None

        for disk in to_format:
            try:
                print(_("Formatting /dev/%s. This may take a moment.") % disk)
                format_dasd(disk)
            except DasdFormatError as err:
                # Log errors if formatting fails, but don't halt the installer
                log.error("dasdfmt /dev/%s failed: %s", disk, err)
                continue

        # need to make devicetree aware of disk changes
        self.storage.devicetree.populate()
        if not flags.automatedInstall:
            # reinit storage
            threadMgr.add(
                AnacondaThread(
                    name=THREAD_STORAGE,
                    target=storageInitialize,
                    args=(self.storage, self.data,
                          self.storage.devicetree.protectedDevNames)))
            # update the summary screen with the changes
            self._initialize()
예제 #33
0
    def on_back_clicked(self, button):
        # We can't exit early if it looks like nothing has changed because the
        # user might want to change settings presented in the dialogs shown from
        # within this method.

        # Do not enter this method multiple times if user clicking multiple times
        # on back button
        if self._back_clicked:
            return
        else:
            self._back_clicked = True

        # make sure the snapshot of unmodified on-disk-storage model is created
        if not on_disk_storage.created:
            on_disk_storage.create_snapshot(self.storage)

        if self.autopart_missing_passphrase:
            self._setup_passphrase()
            NormalSpoke.on_back_clicked(self, button)
            return

        # No disks selected?  The user wants to back out of the storage spoke.
        if not self.selected_disks:
            NormalSpoke.on_back_clicked(self, button)
            return

        disk_selection_changed = False
        if self._last_selected_disks:
            disk_selection_changed = (self._last_selected_disks != set(
                self.selected_disks))

        # remember the disk selection for future decisions
        self._last_selected_disks = set(self.selected_disks)

        if disk_selection_changed:
            # Changing disk selection is really, really complicated and has
            # always been causing numerous hard bugs. Let's not play the hero
            # game and just revert everything and start over again.
            on_disk_storage.reset_to_snapshot(self.storage)
            self.disks = getDisks(self.storage.devicetree)
        else:
            # Remove all non-existing devices if autopart was active when we last
            # refreshed.
            if self._previous_autopart:
                self._previous_autopart = False
                self._remove_nonexistant_partitions()

        # hide disks as requested
        self._hide_disks()

        # make sure no containers were split up by the user's disk selection
        self.clear_info()

        # if there are some disk selection errors we don't let user to leave the
        # spoke, so these errors don't have to go to self.errors
        self.disks_errors = checkDiskSelection(self.storage,
                                               self.selected_disks)
        if self.disks_errors:
            # The disk selection has to make sense before we can proceed.
            self.set_error(
                _("There was a problem with your disk selection. "
                  "Click here for details."))
            self._back_clicked = False
            return

        if arch.isS390():
            # check for unformatted DASDs and launch dasdfmt if any discovered
            rc = self._check_dasd_formats()
            if rc == DASD_FORMAT_NO_CHANGE:
                pass
            elif rc == DASD_FORMAT_REFRESH:
                # User hit OK on the dialog
                self.refresh()
            elif rc == DASD_FORMAT_RETURN_TO_HUB:
                # User clicked uri to return to hub.
                NormalSpoke.on_back_clicked(self, button)
                return
            else:
                # User either hit cancel on the dialog or closed it via escape,
                # there was no formatting done.
                self._back_clicked = False
                return

        # even if they're not doing autopart, setting autopart.encrypted
        # establishes a default of encrypting new devices
        self.encrypted = self._encrypted.get_active()

        # We might first need to ask about an encryption passphrase.
        if self.encrypted and not self._setup_passphrase():
            self._back_clicked = False
            return

        # At this point there are three possible states:
        # 1) user chose custom part => just send them to the CustomPart spoke
        # 2) user wants to reclaim some more space => run the ResizeDialog
        # 3) we are just asked to do autopart => check free space and see if we need
        #                                        user to do anything more
        self.autopart = not self._customPart.get_active()
        disks = [d for d in self.disks if d.name in self.selected_disks]
        dialog = None
        if not self.autopart:
            self.skipTo = "CustomPartitioningSpoke"
        elif self._reclaim.get_active():
            # HINT: change the logic of this 'if' statement if we are asked to
            # support "reclaim before custom partitioning"

            # respect disk selection and other choices in the ReclaimDialog
            self.apply()
            dialog = ResizeDialog(self.data, self.storage, self.payload)
            dialog.refresh(disks)
        else:
            dialog = self._check_space_and_get_dialog(disks)

        if dialog:
            # more dialogs may need to be run based on user choices, but we are
            # only interested in the final result
            rc = self._run_dialogs(disks, start_with=dialog)

            if rc == RESPONSE_OK:
                # nothing special needed
                pass
            elif rc == RESPONSE_CANCEL:
                # A cancel button was clicked on one of the dialogs.  Stay on this
                # spoke.  Generally, this is because the user wants to add more disks.
                self._back_clicked = False
                return
            elif rc == RESPONSE_MODIFY_SW:
                # The "Fedora software selection" link was clicked on one of the
                # dialogs.  Send the user to the software spoke.
                self.skipTo = "SoftwareSelectionSpoke"
            elif rc == RESPONSE_QUIT:
                # Not enough space, and the user can't do anything about it so
                # they chose to quit.
                raise SystemExit("user-selected exit")
            else:
                # I don't know how we'd get here, but might as well have a
                # catch-all.  Just stay on this spoke.
                self._back_clicked = False
                return

        if self.autopart:
            refreshAutoSwapSize(self.storage)
        self.applyOnSkip = True
        NormalSpoke.on_back_clicked(self, button)