Exemple #1
0
def get_platform():
    """Check the architecture of the system and return an instance of a
       Platform subclass to match.  If the architecture could not be determined,
       raise an exception."""
    if arch.is_ppc():
        ppc_machine = arch.get_ppc_machine()

        if (ppc_machine == "PMac" and arch.get_ppc_mac_gen() == "NewWorld"):
            return NewWorldPPC()
        elif ppc_machine in ["iSeries", "pSeries"]:
            return IPSeriesPPC()
        elif ppc_machine == "PowerNV":
            return PowerNV()
        elif ppc_machine == "PS3":
            return PS3()
        else:
            raise SystemError("Unsupported PPC machine type: %s" % ppc_machine)
    elif arch.is_s390():
        return S390()
    elif arch.is_efi():
        if arch.is_mactel():
            return MacEFI()
        elif arch.is_aarch64():
            return Aarch64EFI()
        elif arch.is_arm():
            return ArmEFI()
        else:
            return EFI()
    elif arch.is_x86():
        return X86()
    elif arch.is_arm():
        return ARM()
    else:
        raise SystemError("Could not determine system architecture.")
    def initialize(self):
        super().initialize()
        self.initialize_start()

        self.pages = {
            PAGE_SEARCH: SearchPage(self.storage, self.builder),
            PAGE_MULTIPATH: MultipathPage(self.storage, self.builder),
            PAGE_OTHER: OtherPage(self.storage, self.builder),
            PAGE_NVDIMM: NvdimmPage(self.storage, self.builder),
            PAGE_Z: ZPage(self.storage, self.builder),
        }

        self._notebook = self.builder.get_object("advancedNotebook")

        if not arch.is_s390():
            self._notebook.remove_page(-1)
            self.builder.get_object("addZFCPButton").destroy()
            self.builder.get_object("addDASDButton").destroy()

        if not has_fcoe():
            self.builder.get_object("addFCOEButton").destroy()

        if not iscsi.available:
            self.builder.get_object("addISCSIButton").destroy()


        self._store = self.builder.get_object("diskStore")
        self._addDisksButton = self.builder.get_object("addDisksButton")

        # The button is sensitive only on NVDIMM page
        self._reconfigureNVDIMMButton.set_sensitive(False)

        # report that we are done
        self.initialize_done()
Exemple #3
0
    def setup(self, store, selected_names, disks):
        """ Set up our Z-page, but only if we're running on s390x. """
        if not arch.is_s390():
            return

        ccws = []
        wwpns = []
        luns = []

        for disk in disks:
            paths = [d.name for d in disk.parents]
            selected = disk.name in selected_names

            if getattr(disk, "type") != "zfcp":
                continue

            # remember to store all of the zfcp-related junk so we can
            # see it in the UI
            if disk.fcp_lun not in luns:
                luns.append(disk.fcp_lun)
            if disk.wwpn not in wwpns:
                wwpns.append(disk.wwpn)
            if disk.hba_id not in ccws:
                ccws.append(disk.hba_id)

            store.append([
                True, selected, not disk.protected,
                disk.name, "", disk.model, str(disk.size),
                disk.vendor, disk.bus, disk.serial, "", "\n".join(paths),
                "", "", disk.fcp_lun, disk.hba_id, disk.wwpn, "", ""
            ])

        self._setup_search_type()
Exemple #4
0
def _reset_storage(storage):
    """Do reset the storage.

    FIXME: Call the DBus task instead of this function.

    :param storage: an instance of the Blivet's storage object
    """
    # Set the ignored and exclusive disks.
    disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION)
    storage.ignored_disks = disk_select_proxy.IgnoredDisks
    storage.exclusive_disks = disk_select_proxy.ExclusiveDisks
    storage.protected_devices = disk_select_proxy.ProtectedDevices
    storage.disk_images = disk_select_proxy.DiskImages

    # Reload additional modules.
    if not conf.target.is_image:
        iscsi.startup()

        fcoe_proxy = STORAGE.get_proxy(FCOE)
        fcoe_proxy.ReloadModule()

        if arch.is_s390():
            zfcp_proxy = STORAGE.get_proxy(ZFCP)
            zfcp_proxy.ReloadModule()

    # Do the reset.
    storage.reset()
Exemple #5
0
def time_initialize(timezone_proxy, storage):
    """
    Try to guess if RTC uses UTC time or not, set timezone.isUtc properly and
    set system time from RTC using the UTC guess.
    Guess is done by searching for bootable ntfs devices.

    :param timezone_proxy: DBus proxy of the timezone module
    :param storage: pyanaconda.storage.InstallerStorage instance
    """

    if arch.is_s390():
        # nothing to do on s390(x) were hwclock doesn't exist
        return

    if not timezone_proxy.IsUTC and not flags.automatedInstall:
        # if set in the kickstart, no magic needed here
        threadMgr.wait(THREAD_STORAGE)
        ntfs_devs = filter(lambda dev: dev.format.name == "ntfs",
                           storage.devices)

        timezone_proxy.SetIsUTC(not storage.bootloader.has_windows(ntfs_devs))

    cmd = "hwclock"
    args = ["--hctosys"]
    if timezone_proxy.IsUTC:
        args.append("--utc")
    else:
        args.append("--localtime")

    util.execWithRedirect(cmd, args)
Exemple #6
0
def verify_s390_constraints(storage, constraints, report_error, report_warning):
    """ Verify constraints for s390x.

        Prevent users from installing on s390x with (a) no /boot volume, (b) the
        root volume on LVM, and (c) the root volume not restricted to a single
        PV

        NOTE: There is not really a way for users to create a / volume
        restricted to a single PV.  The backend support is there, but there are
        no UI hook-ups to drive that functionality, but I do not personally
        care.  --dcantrell

        :param storage: a storage to check
        :param constraints: a dictionary of constraints
        :param report_error: a function for error reporting
        :param report_warning: a function for warning reporting
    """
    root = storage.fsset.root_device

    if arch.is_s390() and '/boot' not in storage.mountpoints and root:
        if root.type == 'lvmlv' and not root.single_pv:
            report_error(_("This platform requires /boot on a dedicated "
                           "partition or logical volume. If you do not "
                           "want a /boot volume, you must place / on a "
                           "dedicated non-LVM partition."))
    def initialize(self):
        NormalSpoke.initialize(self)
        self.initialize_start()

        self.pages = [
            SearchPage(self.storage, self.builder),
            MultipathPage(self.storage, self.builder),
            OtherPage(self.storage, self.builder),
            ZPage(self.storage, self.builder)
        ]

        self._notebook = self.builder.get_object("advancedNotebook")

        if not arch.is_s390():
            self._notebook.remove_page(-1)
            self.builder.get_object("addZFCPButton").destroy()
            self.builder.get_object("addDASDButton").destroy()

        if not has_fcoe():
            self.builder.get_object("addFCOEButton").destroy()

        if not iscsi.available:
            self.builder.get_object("addISCSIButton").destroy()

        self._store = self.builder.get_object("diskStore")
        self._addDisksButton = self.builder.get_object("addDisksButton")

        # report that we are done
        self.initialize_done()
Exemple #8
0
def write_storage_configuration(storage, sysroot=None):
    """Write the storage configuration to sysroot.

    :param storage: the storage object
    :param sysroot: a path to the target OS installation
    """
    if sysroot is None:
        sysroot = util.getSysroot()

    if not os.path.isdir("%s/etc" % sysroot):
        os.mkdir("%s/etc" % sysroot)

    _write_escrow_packets(storage, sysroot)

    storage.make_mtab()
    storage.fsset.write()
    iscsi.write(sysroot, storage)

    fcoe_proxy = STORAGE.get_proxy(FCOE)
    fcoe_proxy.WriteConfiguration(sysroot)

    if arch.is_s390():
        zfcp_proxy = STORAGE.get_proxy(ZFCP)
        zfcp_proxy.WriteConfiguration(sysroot)

    _write_dasd_conf(storage, sysroot)
Exemple #9
0
def time_initialize(timezone, storage, bootloader):
    """
    Try to guess if RTC uses UTC time or not, set timezone.isUtc properly and
    set system time from RTC using the UTC guess.
    Guess is done by searching for bootable ntfs devices.

    :param timezone: ksdata.timezone object
    :param storage: blivet.Blivet instance
    :param bootloader: bootloader.Bootloader instance

    """

    if arch.is_s390():
        # nothing to do on s390(x) were hwclock doesn't exist
        return

    if not timezone.isUtc and not flags.automatedInstall:
        # if set in the kickstart, no magic needed here
        threadMgr.wait(THREAD_STORAGE)
        ntfs_devs = filter(lambda dev: dev.format.name == "ntfs",
                           storage.devices)

        timezone.isUtc = not bootloader.has_windows(ntfs_devs)

    cmd = "hwclock"
    args = ["--hctosys"]
    if timezone.isUtc:
        args.append("--utc")
    else:
        args.append("--localtime")

    iutil.execWithRedirect(cmd, args)
Exemple #10
0
    def __init__(self, data, storage, payload, instclass):
        NormalTUISpoke.__init__(self, data, storage, payload, instclass)

        self.title = N_("Installation Destination")
        self._ready = False
        self._container = None
        self.selected_disks = self.data.ignoredisk.onlyuse[:]
        self.select_all = False

        self.autopart = None

        # This list gets set up once in initialize and should not be modified
        # except perhaps to add advanced devices. It will remain the full list
        # of disks that can be included in the install.
        self.disks = []
        self.errors = []
        self.warnings = []

        if self.data.zerombr.zerombr and arch.is_s390():
            # if zerombr is specified in a ks file and there are unformatted
            # dasds, automatically format them. pass in storage.devicetree here
            # instead of storage.disks since media_present is checked on disks;
            # a dasd needing dasdfmt will fail this media check though
            to_format = [
                d for d in getDisks(self.storage.devicetree) if
                d.type == "dasd" and blockdev.s390.dasd_needs_format(d.busid)
            ]
            if to_format:
                self.run_dasdfmt(to_format)

        if not flags.automatedInstall:
            # default to using autopart for interactive installs
            self.data.autopart.autopart = True
Exemple #11
0
def write_storage_configuration(storage, sysroot=None):
    """Write the storage configuration to sysroot.

    :param storage: the storage object
    :param sysroot: a path to the target OS installation
    """
    if sysroot is None:
        sysroot = conf.target.system_root

    if not os.path.isdir("%s/etc" % sysroot):
        os.mkdir("%s/etc" % sysroot)

    _write_escrow_packets(storage, sysroot)

    storage.make_mtab()
    storage.fsset.write()

    iscsi_proxy = STORAGE.get_proxy(ISCSI)
    iscsi_proxy.WriteConfiguration()

    fcoe_proxy = STORAGE.get_proxy(FCOE)
    fcoe_proxy.WriteConfiguration()

    if arch.is_s390():
        zfcp_proxy = STORAGE.get_proxy(ZFCP)
        zfcp_proxy.WriteConfiguration()

    _write_dasd_conf(storage, sysroot)
Exemple #12
0
def reset_storage(storage):
    """Reset the storage.

    FIXME: A temporary workaround for UI,

    :param storage: an instance of the Blivet's storage object
    """
    # Update the config.
    update_storage_config(storage.config)

    # Set the ignored and exclusive disks.
    disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION)
    storage.ignored_disks = disk_select_proxy.IgnoredDisks
    storage.exclusive_disks = disk_select_proxy.SelectedDisks

    # Reload additional modules.
    if not conf.target.is_image:
        iscsi.startup()

        fcoe_proxy = STORAGE.get_proxy(FCOE)
        fcoe_proxy.ReloadModule()

        if arch.is_s390():
            zfcp_proxy = STORAGE.get_proxy(ZFCP)
            zfcp_proxy.ReloadModule()

    # Do the reset.
    storage.reset()
Exemple #13
0
def verify_s390_constraints(storage, constraints, report_error, report_warning):
    """ Verify constraints for s390x.

        Prevent users from installing on s390x with (a) no /boot volume, (b) the
        root volume on LVM, and (c) the root volume not restricted to a single
        PV

        NOTE: There is not really a way for users to create a / volume
        restricted to a single PV.  The backend support is there, but there are
        no UI hook-ups to drive that functionality, but I do not personally
        care.  --dcantrell

        :param storage: a storage to check
        :param constraints: a dictionary of constraints
        :param report_error: a function for error reporting
        :param report_warning: a function for warning reporting
    """
    root = storage.fsset.root_device

    if arch.is_s390() and '/boot' not in storage.mountpoints and root:
        if root.type == 'lvmlv' and not root.single_pv:
            report_error(_("This platform requires /boot on a dedicated "
                           "partition or logical volume. If you do not "
                           "want a /boot volume, you must place / on a "
                           "dedicated non-LVM partition."))
Exemple #14
0
    def initialize(self):
        super().initialize()
        self.initialize_start()

        self.pages = {
            PAGE_SEARCH: SearchPage(self.storage, self.builder),
            PAGE_MULTIPATH: MultipathPage(self.storage, self.builder),
            PAGE_OTHER: OtherPage(self.storage, self.builder),
            PAGE_NVDIMM: NvdimmPage(self.storage, self.builder),
            PAGE_Z: ZPage(self.storage, self.builder),
        }

        self._notebook = self.builder.get_object("advancedNotebook")

        if not arch.is_s390():
            self._notebook.remove_page(-1)
            self.builder.get_object("addZFCPButton").destroy()
            self.builder.get_object("addDASDButton").destroy()

        if not has_fcoe():
            self.builder.get_object("addFCOEButton").destroy()

        if not iscsi.available:
            self.builder.get_object("addISCSIButton").destroy()

        self._store = self.builder.get_object("diskStore")

        # The button is sensitive only on NVDIMM page
        self._reconfigureNVDIMMButton.set_sensitive(False)

        # report that we are done
        self.initialize_done()
Exemple #15
0
def verify_s390_constraints(storage, constraints, report_error, report_warning):
    """ Verify constraints for s390x.

        Prevent users from installing on s390x with (a) no /boot volume, (b) the
        root volume on LVM, (c) the root volume not restricted to a single PV,
        and (d) LDL DASD disks.

        NOTE: There is not really a way for users to create a / volume
        restricted to a single PV.  The backend support is there, but there are
        no UI hook-ups to drive that functionality, but I do not personally
        care.  --dcantrell

        :param storage: a storage to check
        :param constraints: a dictionary of constraints
        :param report_error: a function for error reporting
        :param report_warning: a function for warning reporting
    """
    if not arch.is_s390():
        return

    root = storage.fsset.root_device
    if '/boot' not in storage.mountpoints and root:
        if root.type == 'lvmlv' and not root.single_pv:
            report_error(_("This platform requires /boot on a dedicated "
                           "partition or logical volume. If you do not "
                           "want a /boot volume, you must place / on a "
                           "dedicated non-LVM partition."))

    for disk in storage.disks:
        if disk.type == "dasd" and blockdev.s390.dasd_is_ldl(disk.name):
            report_error(_("The LDL DASD disk {name} ({busid}) cannot be used "
                           "for the installation. Please format it.")
                         .format(name="/dev/" + disk.name, busid=disk.busid))
Exemple #16
0
    def initialize(self):
        NormalSpoke.initialize(self)
        self.initialize_start()

        self.pages = [SearchPage(self.storage, self.builder),
                      MultipathPage(self.storage, self.builder),
                      OtherPage(self.storage, self.builder),
                      ZPage(self.storage, self.builder)]

        self._notebook = self.builder.get_object("advancedNotebook")

        if not arch.is_s390():
            self._notebook.remove_page(-1)
            self.builder.get_object("addZFCPButton").destroy()
            self.builder.get_object("addDASDButton").destroy()

        if not has_fcoe():
            self.builder.get_object("addFCOEButton").destroy()

        if not iscsi.available:
            self.builder.get_object("addISCSIButton").destroy()

        self._store = self.builder.get_object("diskStore")
        self._addDisksButton = self.builder.get_object("addDisksButton")

        # report that we are done
        self.initialize_done()
Exemple #17
0
def get_platform():
    """Check the architecture of the system and return an instance of a
       Platform subclass to match.  If the architecture could not be determined,
       raise an exception."""
    if arch.is_ppc():
        ppc_machine = arch.get_ppc_machine()

        if (ppc_machine == "PMac" and arch.get_ppc_mac_gen() == "NewWorld"):
            return NewWorldPPC()
        elif ppc_machine in ["iSeries", "pSeries"]:
            return IPSeriesPPC()
        elif ppc_machine == "PS3":
            return PS3()
        else:
            raise SystemError("Unsupported PPC machine type: %s" % ppc_machine)
    elif arch.is_s390():
        return S390()
    elif arch.is_efi():
        if arch.is_mactel():
            return MacEFI()
        elif arch.is_aarch64():
            return Aarch64EFI()
        elif arch.is_arm():
            return ArmEFI()
        else:
            return EFI()
    elif arch.is_x86():
        return X86()
    elif arch.is_arm():
        return ARM()
    else:
        raise SystemError("Could not determine system architecture.")
Exemple #18
0
def time_initialize(timezone_proxy):
    """
    Try to guess if RTC uses UTC time or not, set timezone.isUtc properly and
    set system time from RTC using the UTC guess.
    Guess is done by searching for bootable ntfs devices.

    :param timezone_proxy: DBus proxy of the timezone module
    """
    if arch.is_s390():
        # nothing to do on s390(x) were hwclock doesn't exist
        return

    if not timezone_proxy.IsUTC and not flags.automatedInstall:
        # if set in the kickstart, no magic needed here
        threadMgr.wait(THREAD_STORAGE)
        bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
        is_utc = not bootloader_proxy.DetectWindows()
        timezone_proxy.SetIsUTC(is_utc)

    cmd = "hwclock"
    args = ["--hctosys"]
    if timezone_proxy.IsUTC:
        args.append("--utc")
    else:
        args.append("--localtime")

    util.execWithRedirect(cmd, args)
Exemple #19
0
    def _write_etc_adjtime(self):
        """Write /etc/adjtime contents.

        :raise: TimezoneConfigurationError
        """
        if arch.is_s390():
            # there is no HW clock on s390(x)
            return

        try:
            with open(os.path.normpath(self._sysroot + "/etc/adjtime"),
                      "r") as fobj:
                lines = fobj.readlines()
        except IOError:
            lines = ["0.0 0 0.0\n", "0\n"]

        try:
            with open(os.path.normpath(self._sysroot + "/etc/adjtime"),
                      "w") as fobj:
                fobj.write(lines[0])
                fobj.write(lines[1])
                if self._is_utc:
                    fobj.write("UTC\n")
                else:
                    fobj.write("LOCAL\n")
        except IOError as ioerr:
            msg = "Error while writing /etc/adjtime file: {}".format(
                ioerr.strerror)
            raise TimezoneConfigurationError(msg)
Exemple #20
0
def verify_s390_constraints(storage, constraints, report_error, report_warning):
    """ Verify constraints for s390x.

        Prevent users from installing on s390x with (a) no /boot volume, (b) the
        root volume on LVM, (c) the root volume not restricted to a single PV,
        and (d) LDL DASD disks.

        NOTE: There is not really a way for users to create a / volume
        restricted to a single PV.  The backend support is there, but there are
        no UI hook-ups to drive that functionality, but I do not personally
        care.  --dcantrell

        :param storage: a storage to check
        :param constraints: a dictionary of constraints
        :param report_error: a function for error reporting
        :param report_warning: a function for warning reporting
    """
    if not arch.is_s390():
        return

    root = storage.fsset.root_device
    if '/boot' not in storage.mountpoints and root:
        if root.type == 'lvmlv' and not root.single_pv:
            report_error(_("This platform requires /boot on a dedicated "
                           "partition or logical volume. If you do not "
                           "want a /boot volume, you must place / on a "
                           "dedicated non-LVM partition."))

    for disk in storage.disks:
        if disk.type == "dasd" and blockdev.s390.dasd_is_ldl(disk.name):
            report_error(_("The LDL DASD disk {name} ({busid}) cannot be used "
                           "for the installation. Please format it.")
                         .format(name="/dev/" + disk.name, busid=disk.busid))
Exemple #21
0
    def __init__(self):
        super().__init__()
        # We need this so all the /dev/disk/* stuff is set up.
        udev.trigger(subsystem="block", action="change")

        self._modules = []

        self._disk_init_module = DiskInitializationModule()
        self._add_module(self._disk_init_module)

        self._disk_selection_module = DiskSelectionModule()
        self._add_module(self._disk_selection_module)

        self._bootloader_module = BootloaderModule()
        self._add_module(self._bootloader_module)

        self._auto_part_module = AutoPartitioningModule()
        self._add_module(self._auto_part_module)

        self._manual_part_module = ManualPartitioningModule()
        self._add_module(self._manual_part_module)

        self._dasd_module = None
        self._zfcp_module = None

        if arch.is_s390():
            self._dasd_module = DASDModule()
            self._add_module(self._dasd_module)

            self._zfcp_module = ZFCPModule()
            self._add_module(self._zfcp_module)
Exemple #22
0
def time_initialize(timezone, storage, bootloader):
    """
    Try to guess if RTC uses UTC time or not, set timezone.isUtc properly and
    set system time from RTC using the UTC guess.
    Guess is done by searching for bootable ntfs devices.

    :param timezone: ksdata.timezone object
    :param storage: blivet.Blivet instance
    :param bootloader: bootloader.Bootloader instance

    """

    if arch.is_s390():
        # nothing to do on s390(x) were hwclock doesn't exist
        return

    if not timezone.isUtc and not flags.automatedInstall:
        # if set in the kickstart, no magic needed here
        threadMgr.wait(THREAD_STORAGE)
        ntfs_devs = filter(lambda dev: dev.format.name == "ntfs",
                           storage.devices)

        timezone.isUtc = not bootloader.has_windows(ntfs_devs)

    cmd = "hwclock"
    args = ["--hctosys"]
    if timezone.isUtc:
        args.append("--utc")
    else:
        args.append("--localtime")

    iutil.execWithRedirect(cmd, args)
Exemple #23
0
def _reset_storage(storage):
    """Do reset the storage.

    FIXME: Call the DBus task instead of this function.

    :param storage: an instance of the Blivet's storage object
    """
    # Set the ignored and exclusive disks.
    disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION)
    storage.ignored_disks = disk_select_proxy.IgnoredDisks
    storage.exclusive_disks = disk_select_proxy.ExclusiveDisks
    storage.protected_devices = disk_select_proxy.ProtectedDevices
    storage.disk_images = disk_select_proxy.DiskImages

    # Reload additional modules.
    if not conf.target.is_image:
        iscsi_proxy = STORAGE.get_proxy(ISCSI)
        iscsi_proxy.ReloadModule()

        fcoe_proxy = STORAGE.get_proxy(FCOE)
        fcoe_proxy.ReloadModule()

        if arch.is_s390():
            zfcp_proxy = STORAGE.get_proxy(ZFCP)
            zfcp_proxy.ReloadModule()

    # Do the reset.
    storage.reset()
Exemple #24
0
    def _initialize_zipl_secure_boot(self):
        if not arch.is_s390():
            self._secure_boot_box.hide()
            return

        secure_boot = self._bootloader_module.ZIPLSecureBoot
        self._secure_boot_combo.set_active_id(secure_boot)
Exemple #25
0
    def __init__(self, app, data, storage, payload, instclass):
        NormalTUISpoke.__init__(self, app, data, storage, payload, instclass)

        self._ready = False
        self.selected_disks = self.data.ignoredisk.onlyuse[:]
        self.selection = None

        self.autopart = None
        self.clearPartType = None

        # This list gets set up once in initialize and should not be modified
        # except perhaps to add advanced devices. It will remain the full list
        # of disks that can be included in the install.
        self.disks = []
        self.errors = []
        self.warnings = []

        if self.data.zerombr.zerombr and arch.is_s390():
            # if zerombr is specified in a ks file and there are unformatted
            # dasds, automatically format them. pass in storage.devicetree here
            # instead of storage.disks since media_present is checked on disks;
            # a dasd needing dasdfmt will fail this media check though
            to_format = [d for d in getDisks(self.storage.devicetree)
                         if d.type == "dasd" and blockdev.s390.dasd_needs_format(d.busid)]
            if to_format:
                self.run_dasdfmt(to_format)

        if not flags.automatedInstall:
            # default to using autopart for interactive installs
            self.data.autopart.autopart = True
Exemple #26
0
    def __init__(self):
        super().__init__()
        # Initialize Blivet.
        enable_installer_mode()

        # The storage model.
        self._storage = None
        self.storage_changed = Signal()

        # Initialize modules.
        self._modules = []

        self._disk_init_module = DiskInitializationModule()
        self._add_module(self._disk_init_module)

        self._disk_selection_module = DiskSelectionModule()
        self._add_module(self._disk_selection_module)

        self._snapshot_module = SnapshotModule()
        self._add_module(self._snapshot_module)

        self._bootloader_module = BootloaderModule()
        self._add_module(self._bootloader_module)

        self._fcoe_module = FCOEModule()
        self._add_module(self._fcoe_module)

        self._nvdimm_module = NVDIMMModule()
        self._add_module(self._nvdimm_module)

        self._dasd_module = None
        self._zfcp_module = None

        if arch.is_s390():
            self._dasd_module = DASDModule()
            self._add_module(self._dasd_module)

            self._zfcp_module = ZFCPModule()
            self._add_module(self._zfcp_module)

        # Initialize the partitioning modules.
        self._partitioning_modules = {}

        self._auto_part_module = AutoPartitioningModule()
        self._add_partitioning_module(AUTO_PARTITIONING.object_path,
                                      self._auto_part_module)

        self._manual_part_module = ManualPartitioningModule()
        self._add_partitioning_module(MANUAL_PARTITIONING.object_path,
                                      self._manual_part_module)

        self._custom_part_module = CustomPartitioningModule()
        self._add_partitioning_module(CUSTOM_PARTITIONING.object_path,
                                      self._custom_part_module)

        # Connect modules to signals.
        self.storage_changed.connect(self._snapshot_module.on_storage_reset)
Exemple #27
0
    def _reload_modules(self):
        """Reload the additional modules."""
        if conf.target.is_image:
            return

        iscsi.startup()
        fcoe.startup()

        if arch.is_s390():
            zfcp.startup()
Exemple #28
0
    def _reload_modules(self):
        """Reload the additional modules."""
        if conf.target.is_image:
            return

        iscsi.startup()
        fcoe.startup()

        if arch.is_s390():
            zfcp.startup()
Exemple #29
0
    def __init__(self, storage, builder):
        FilterPage.__init__(self, storage, builder)
        self.model = self.builder.get_object("zModel")
        self.model.set_visible_func(self.visible_func)

        self._ccwEntry = self.builder.get_object("zCCWEntry")
        self._wwpnEntry = self.builder.get_object("zWWPNEntry")
        self._lunEntry = self.builder.get_object("zLUNEntry")
        self._combo = self.builder.get_object("zTypeCombo")

        self._isS390 = arch.is_s390()
Exemple #30
0
    def __init__(self, storage, builder):
        FilterPage.__init__(self, storage, builder)
        self.model = self.builder.get_object("zModel")
        self.model.set_visible_func(self.visible_func)

        self._ccwEntry = self.builder.get_object("zCCWEntry")
        self._wwpnEntry = self.builder.get_object("zWWPNEntry")
        self._lunEntry = self.builder.get_object("zLUNEntry")
        self._combo = self.builder.get_object("zTypeCombo")

        self._isS390 = arch.is_s390()
Exemple #31
0
def write_timezone_config(timezone_proxy, root):
    """
    Write timezone configuration for the system specified by root.

    :param timezone_proxy: DBus proxy of the timezone module
    :param root: path to the root
    :raise: TimezoneConfigError

    """

    # we want to create a relative symlink
    tz_file = "/usr/share/zoneinfo/" + timezone_proxy.Timezone
    rooted_tz_file = os.path.normpath(root + tz_file)
    relative_path = os.path.normpath("../" + tz_file)
    link_path = os.path.normpath(root + "/etc/localtime")

    if not os.access(rooted_tz_file, os.R_OK):
        log.error("Timezone to be linked (%s) doesn't exist", rooted_tz_file)
    else:
        try:
            # os.symlink fails if link_path exists, so try to remove it first
            os.remove(link_path)
        except OSError:
            pass

        try:
            os.symlink(relative_path, link_path)
        except OSError as oserr:
            log.error("Error when symlinking timezone (from %s): %s",
                      rooted_tz_file, oserr.strerror)

    if arch.is_s390():
        # there is no HW clock on s390(x)
        return

    try:
        fobj = open(os.path.normpath(root + "/etc/adjtime"), "r")
        lines = fobj.readlines()
        fobj.close()
    except IOError:
        lines = ["0.0 0 0.0\n", "0\n"]

    try:
        with open(os.path.normpath(root + "/etc/adjtime"), "w") as fobj:
            fobj.write(lines[0])
            fobj.write(lines[1])
            if timezone_proxy.IsUTC:
                fobj.write("UTC\n")
            else:
                fobj.write("LOCAL\n")
    except IOError as ioerr:
        msg = "Error while writing /etc/adjtime file: %s" % ioerr.strerror
        raise TimezoneConfigError(msg)
Exemple #32
0
    def input(self, args, key):
        """Grab the disk choice and update things"""
        self.errors = []
        try:
            keyid = int(key) - 1
            if keyid < 0:
                return key
            self.selection = keyid
            if len(self.disks) > 1 and keyid == len(self.disks):
                self._select_all_disks()
            else:
                self._update_disk_list(self.disks[keyid])
            return INPUT_PROCESSED
        except (ValueError, IndexError):
            # TRANSLATORS: 'c' to continue
            if key.lower() == C_('TUI|Spoke Navigation', 'c'):
                if self.selected_disks:
                    # check selected disks to see if we have any unformatted DASDs
                    # if we're on s390x, since they need to be formatted before we
                    # can use them.
                    if arch.is_s390():
                        _disks = [
                            d for d in self.disks
                            if d.name in self.selected_disks
                        ]
                        to_format = [
                            d for d in _disks if d.type == "dasd"
                            and blockdev.s390.dasd_needs_format(d.busid)
                        ]
                        if to_format:
                            self.run_dasdfmt(to_format)
                            return None

                    # make sure no containers were split up by the user's disk
                    # selection
                    self.errors.extend(
                        checkDiskSelection(self.storage, self.selected_disks))
                    if self.errors:
                        # The disk selection has to make sense before we can
                        # proceed.
                        return None

                    newspoke = AutoPartSpoke(self.app, self.data, self.storage,
                                             self.payload, self.instclass)
                    self.app.switch_screen_modal(newspoke)
                    self.apply()
                    self.execute()
                    self.close()
                return INPUT_PROCESSED
            else:
                return super(StorageSpoke, self).input(args, key)
Exemple #33
0
    def run(self):
        """Perform the actual work of setting the Hardware Clock from the System Clock."""
        if arch.is_s390():
            log.debug("There is not Hardware Clock on s390x.")
            return

        if conf.system.can_set_hardware_clock:
            cmd = "hwclock"
            args = ["--systohc"]
            if self._is_utc:
                args.append("--utc")
            else:
                args.append("--local")

            util.execWithRedirect(cmd, args)
Exemple #34
0
    def input(self, args, key):
        """Grab the disk choice and update things"""
        self.errors = []
        if self._container.process_user_input(key):
            self.redraw()
            return InputState.PROCESSED
        else:
            # TRANSLATORS: 'c' to continue
            if key.lower() == C_('TUI|Spoke Navigation', 'c'):
                if self.selected_disks:
                    # check selected disks to see if we have any unformatted DASDs
                    # if we're on s390x, since they need to be formatted before we
                    # can use them.
                    if arch.is_s390():
                        _disks = [
                            d for d in self.disks
                            if d.name in self.selected_disks
                        ]
                        to_format = [
                            d for d in _disks if d.type == "dasd"
                            and blockdev.s390.dasd_needs_format(d.busid)
                        ]
                        if to_format:
                            self.run_dasdfmt(to_format)
                            self.redraw()
                            return InputState.PROCESSED

                    # make sure no containers were split up by the user's disk
                    # selection
                    self.errors.extend(
                        checkDiskSelection(self.storage, self.selected_disks))
                    if self.errors:
                        # The disk selection has to make sense before we can
                        # proceed.
                        self.redraw()
                        return InputState.PROCESSED

                    self.apply()
                    new_spoke = PartTypeSpoke(self.data, self.storage,
                                              self.payload, self.instclass)
                    ScreenHandler.push_screen_modal(new_spoke)
                    self.apply()
                    self.execute()
                    self.close()

                return InputState.PROCESSED
            else:
                return super(StorageSpoke, self).input(args, key)
Exemple #35
0
    def run(self):
        """Run the task."""
        if not arch.is_s390():
            log.debug("ZIPL can be run only on s390x.")
            return

        if conf.target.is_directory:
            log.debug(
                "The bootloader installation is disabled for dir installations."
            )
            return

        if self._mode == BootloaderMode.DISABLED:
            log.debug("The bootloader installation is disabled.")
            return

        execInSysroot("zipl", [])
Exemple #36
0
    def input(self, args, key):
        """Grab the disk choice and update things"""
        self.errors = []
        try:
            keyid = int(key) - 1
            if keyid < 0:
                return key
            self.selection = keyid
            if len(self.disks) > 1 and keyid == len(self.disks):
                self._select_all_disks()
            else:
                self._update_disk_list(self.disks[keyid])
            return INPUT_PROCESSED
        except (ValueError, IndexError):
            # TRANSLATORS: 'c' to continue
            if key.lower() == C_('TUI|Spoke Navigation', 'c'):
                if self.selected_disks:
                    # check selected disks to see if we have any unformatted DASDs
                    # if we're on s390x, since they need to be formatted before we
                    # can use them.
                    if arch.is_s390():
                        _disks = [d for d in self.disks if d.name in self.selected_disks]
                        to_format = [d for d in _disks if d.type == "dasd" and
                                     blockdev.s390.dasd_needs_format(d.busid)]
                        if to_format:
                            self.run_dasdfmt(to_format)
                            return None

                    # make sure no containers were split up by the user's disk
                    # selection
                    self.errors.extend(checkDiskSelection(self.storage,
                                                          self.selected_disks))
                    if self.errors:
                        # The disk selection has to make sense before we can
                        # proceed.
                        return None

                    newspoke = AutoPartSpoke(self.app, self.data, self.storage,
                                             self.payload, self.instclass)
                    self.app.switch_screen_modal(newspoke)
                    self.apply()
                    self.execute()
                    self.close()
                return INPUT_PROCESSED
            else:
                return super(StorageSpoke, self).input(args, key)
Exemple #37
0
def verify_s390_constraints(storage, constraints, report_error, report_warning):
    """ Verify constraints for s390x.

        Prevent users from installing on s390x with LDL DASD disks.

        :param storage: a storage to check
        :param constraints: a dictionary of constraints
        :param report_error: a function for error reporting
        :param report_warning: a function for warning reporting
    """
    if not arch.is_s390():
        return

    for disk in storage.disks:
        if disk.type == "dasd" and blockdev.s390.dasd_is_ldl(disk.name):
            report_error(_("The LDL DASD disk {name} ({busid}) cannot be used "
                           "for the installation. Please format it.")
                         .format(name="/dev/" + disk.name, busid=disk.busid))
Exemple #38
0
def save_hw_clock(timezone):
    """
    Save system time to HW clock.

    :param timezone: ksdata.timezone object

    """

    if arch.is_s390():
        return

    cmd = "hwclock"
    args = ["--systohc"]
    if timezone.isUtc:
        args.append("--utc")
    else:
        args.append("--local")

    iutil.execWithRedirect(cmd, args)
Exemple #39
0
def save_hw_clock(timezone):
    """
    Save system time to HW clock.

    :param timezone: ksdata.timezone object

    """

    if arch.is_s390():
        return

    cmd = "hwclock"
    args = ["--systohc"]
    if timezone.isUtc:
        args.append("--utc")
    else:
        args.append("--local")

    iutil.execWithRedirect(cmd, args)
Exemple #40
0
    def _write_dasd_conf(self, storage, sysroot):
        """Write DASD configuration to sysroot.

        Write /etc/dasd.conf to target system for all DASD devices
        configured during installation.

        :param storage: the storage object
        :param sysroot: a path to the target OS installation
        """
        dasds = [d for d in storage.devices if d.type == "dasd"]
        dasds.sort(key=lambda d: d.name)
        if not (arch.is_s390() and dasds):
            return

        with open(os.path.realpath(sysroot + "/etc/dasd.conf"), "w") as f:
            for dasd in dasds:
                fields = [dasd.busid] + dasd.get_opts()
                f.write("%s\n" % " ".join(fields), )

        # check for hyper PAV aliases; they need to get added to dasd.conf as well
        sysfs = "/sys/bus/ccw/drivers/dasd-eckd"

        # in the case that someone is installing with *only* FBA DASDs,the above
        # sysfs path will not exist; so check for it and just bail out of here if
        # that's the case
        if not os.path.exists(sysfs):
            return

        # this does catch every DASD, even non-aliases, but we're only going to be
        # checking for a very specific flag, so there won't be any duplicate entries
        # in dasd.conf
        devs = [d for d in os.listdir(sysfs) if d.startswith("0.0")]
        with open(os.path.realpath(sysroot + "/etc/dasd.conf"), "a") as f:
            for d in devs:
                aliasfile = "%s/%s/alias" % (sysfs, d)
                with open(aliasfile, "r") as falias:
                    alias = falias.read().strip()

                # if alias == 1, then the device is an alias; otherwise it is a
                # normal dasd (alias == 0) and we can skip it, since it will have
                # been added to dasd.conf in the above block of code
                if alias == "1":
                    f.write("%s\n" % d)
Exemple #41
0
def _write_dasd_conf(storage, sysroot):
    """Write DASD configuration to sysroot.

    Write /etc/dasd.conf to target system for all DASD devices
    configured during installation.

    :param storage: the storage object
    :param sysroot: a path to the target OS installation
    """
    dasds = [d for d in storage.devices if d.type == "dasd"]
    dasds.sort(key=lambda d: d.name)
    if not (arch.is_s390() and dasds):
        return

    with open(os.path.realpath(sysroot + "/etc/dasd.conf"), "w") as f:
        for dasd in dasds:
            fields = [dasd.busid] + dasd.get_opts()
            f.write("%s\n" % " ".join(fields),)

    # check for hyper PAV aliases; they need to get added to dasd.conf as well
    sysfs = "/sys/bus/ccw/drivers/dasd-eckd"

    # in the case that someone is installing with *only* FBA DASDs,the above
    # sysfs path will not exist; so check for it and just bail out of here if
    # that's the case
    if not os.path.exists(sysfs):
        return

    # this does catch every DASD, even non-aliases, but we're only going to be
    # checking for a very specific flag, so there won't be any duplicate entries
    # in dasd.conf
    devs = [d for d in os.listdir(sysfs) if d.startswith("0.0")]
    with open(os.path.realpath(sysroot + "/etc/dasd.conf"), "a") as f:
        for d in devs:
            aliasfile = "%s/%s/alias" % (sysfs, d)
            with open(aliasfile, "r") as falias:
                alias = falias.read().strip()

            # if alias == 1, then the device is an alias; otherwise it is a
            # normal dasd (alias == 0) and we can skip it, since it will have
            # been added to dasd.conf in the above block of code
            if alias == "1":
                f.write("%s\n" % d)
Exemple #42
0
def enable_installer_mode():
    """Configure Blivet for use by Anaconda."""
    blivet_util.program_log_lock = program_log_lock

    # always enable the debug mode when in the installer mode so that we
    # have more data in the logs for rare cases that are hard to reproduce
    blivet_flags.debug = True

    # We don't want image installs writing backups of the *image* metadata
    # into the *host's* /etc/lvm. This can get real messy on build systems.
    if conf.target.is_image:
        blivet_flags.lvm_metadata_backup = False

    # Set the flags.
    blivet_flags.auto_dev_updates = True
    blivet_flags.selinux_reset_fcon = True
    blivet_flags.keep_empty_ext_partitions = False
    blivet_flags.discard_new = True
    blivet_flags.selinux = conf.security.selinux
    blivet_flags.dmraid = conf.storage.dmraid
    blivet_flags.ibft = conf.storage.ibft
    blivet_flags.multipath_friendly_names = conf.storage.multipath_friendly_names
    blivet_flags.allow_imperfect_devices = conf.storage.allow_imperfect_devices
    blivet_flags.btrfs_compression = conf.storage.btrfs_compression

    # Platform class setup depends on flags, re-initialize it.
    _set_default_label_type()

    # Set the minimum required entropy.
    luks_data.min_entropy = crypto.MIN_CREATE_ENTROPY

    # Load plugins.
    if arch.is_s390():
        _load_plugin_s390()

    # Set the device name regexes to ignore.
    udev.ignored_device_names = [
        r'^mtd', r'^mmcblk.+boot', r'^mmcblk.+rpmb', r'^zram', '^ndblk'
    ]

    # We need this so all the /dev/disk/* stuff is set up.
    udev.trigger(subsystem="block", action="change")
Exemple #43
0
    def input(self, args, key):
        """Grab the disk choice and update things"""
        self.errors = []
        if self._container.process_user_input(key):
            self.redraw()
            return InputState.PROCESSED
        else:
            # TRANSLATORS: 'c' to continue
            if key.lower() == C_('TUI|Spoke Navigation', 'c'):
                if self.selected_disks:
                    # check selected disks to see if we have any unformatted DASDs
                    # if we're on s390x, since they need to be formatted before we
                    # can use them.
                    if arch.is_s390():
                        _disks = [d for d in self.disks if d.name in self.selected_disks]
                        to_format = [d for d in _disks if d.type == "dasd" and
                                     blockdev.s390.dasd_needs_format(d.busid)]
                        if to_format:
                            self.run_dasdfmt(to_format)
                            self.redraw()
                            return InputState.PROCESSED

                    # make sure no containers were split up by the user's disk
                    # selection
                    self.errors.extend(checkDiskSelection(self.storage,
                                                          self.selected_disks))
                    if self.errors:
                        # The disk selection has to make sense before we can
                        # proceed.
                        self.redraw()
                        return InputState.PROCESSED

                    new_spoke = AutoPartSpoke(self.data, self.storage,
                                              self.payload, self.instclass)
                    ScreenHandler.push_screen_modal(new_spoke)
                    self.apply()
                    self.execute()
                    self.close()

                return InputState.PROCESSED
            else:
                return super(StorageSpoke, self).input(args, key)
Exemple #44
0
    def __init__(self):
        super().__init__()
        self._modules = []

        self._disk_init_module = DiskInitializationModule()
        self._add_module(self._disk_init_module)

        self._disk_selection_module = DiskSelectionModule()
        self._add_module(self._disk_selection_module)

        self._bootloader_module = BootloaderModule()
        self._add_module(self._bootloader_module)

        self._autopart_module = AutoPartitioningModule()
        self._add_module(self._autopart_module)

        if arch.is_s390():
            self._dasd_module = DASDModule()
            self._add_module(self._dasd_module)
        else:
            self._dasd_module = None
Exemple #45
0
def save_hw_clock(timezone_proxy=None):
    """
    Save system time to HW clock.

    :param timezone_proxy: DBus proxy of the timezone module

    """
    if arch.is_s390():
        return

    if not timezone_proxy:
        timezone_proxy = TIMEZONE.get_proxy()

    cmd = "hwclock"
    args = ["--systohc"]
    if timezone_proxy.IsUTC:
        args.append("--utc")
    else:
        args.append("--local")

    util.execWithRedirect(cmd, args)
Exemple #46
0
    def __init__(self, *args, **kwargs):
        StorageChecker.__init__(self, min_ram=isys.MIN_GUI_RAM)
        NormalSpoke.__init__(self, *args, **kwargs)
        self.applyOnSkip = True

        self._ready = False
        self.autoPartType = None
        self.encrypted = False
        self.passphrase = ""
        self.selected_disks = self.data.ignoredisk.onlyuse[:]
        self._last_selected_disks = None
        self._back_clicked = False
        self.autopart_missing_passphrase = False
        self.disks_errors = []

        # This list contains all possible disks that can be included in the install.
        # All types of advanced disks should be set up for us ahead of time, so
        # there should be no need to modify this list.
        self.disks = []

        if not flags.automatedInstall:
            # default to using autopart for interactive installs
            self.data.autopart.autopart = True

        self.autopart = self.data.autopart.autopart
        self.autoPartType = None
        self.clearPartType = CLEARPART_TYPE_NONE

        if self.data.zerombr.zerombr and arch.is_s390():
            # run dasdfmt on any unformatted DASDs automatically
            threadMgr.add(AnacondaThread(name=constants.THREAD_DASDFMT,
                            target=self.run_dasdfmt))

        self._previous_autopart = False

        self._last_clicked_overview = None
        self._cur_clicked_overview = None

        self._grabObjects()
Exemple #47
0
def enable_installer_mode():
    """Configure Blivet for use by Anaconda."""
    blivet_util.program_log_lock = program_log_lock

    # always enable the debug mode when in the installer mode so that we
    # have more data in the logs for rare cases that are hard to reproduce
    blivet_flags.debug = True

    # We don't want image installs writing backups of the *image* metadata
    # into the *host's* /etc/lvm. This can get real messy on build systems.
    if conf.target.is_image:
        blivet_flags.lvm_metadata_backup = False

    # Set the flags.
    blivet_flags.auto_dev_updates = True
    blivet_flags.selinux_reset_fcon = True
    blivet_flags.keep_empty_ext_partitions = False
    blivet_flags.discard_new = True
    blivet_flags.selinux = conf.security.selinux
    blivet_flags.dmraid = conf.storage.dmraid
    blivet_flags.ibft = conf.storage.ibft
    blivet_flags.multipath_friendly_names = conf.storage.multipath_friendly_names
    blivet_flags.allow_imperfect_devices = conf.storage.allow_imperfect_devices

    # Platform class setup depends on flags, re-initialize it.
    platform.update_from_flags()

    # Load plugins.
    if arch.is_s390():
        load_plugin_s390()

    # Set the blacklist.
    udev.device_name_blacklist = [r'^mtd', r'^mmcblk.+boot', r'^mmcblk.+rpmb', r'^zram', '^ndblk']

    # We need this so all the /dev/disk/* stuff is set up.
    udev.trigger(subsystem="block", action="change")
Exemple #48
0
 def is_supported():
     """Is DASD formatting supported on this machine?"""
     return arch.is_s390()
Exemple #49
0
    def __init__(self):
        super().__init__()
        # Initialize Blivet.
        enable_installer_mode()

        # The storage model.
        self._storage = None
        self.storage_changed = Signal()

        # Initialize modules.
        self._modules = []

        self._device_tree_module = DeviceTreeModule()
        self._add_module(self._device_tree_module)

        self._disk_init_module = DiskInitializationModule()
        self._add_module(self._disk_init_module)

        self._disk_selection_module = DiskSelectionModule()
        self._add_module(self._disk_selection_module)

        self._snapshot_module = SnapshotModule()
        self._add_module(self._snapshot_module)

        self._bootloader_module = BootloaderModule()
        self._add_module(self._bootloader_module)

        self._fcoe_module = FCOEModule()
        self._add_module(self._fcoe_module)

        self._nvdimm_module = NVDIMMModule()
        self._add_module(self._nvdimm_module)

        self._dasd_module = None
        self._zfcp_module = None

        if arch.is_s390():
            self._dasd_module = DASDModule()
            self._add_module(self._dasd_module)

            self._zfcp_module = ZFCPModule()
            self._add_module(self._zfcp_module)

        # Initialize the partitioning modules.
        self._partitioning_modules = {}

        self._auto_part_module = AutoPartitioningModule()
        self._add_partitioning_module(AUTO_PARTITIONING.object_path, self._auto_part_module)

        self._manual_part_module = ManualPartitioningModule()
        self._add_partitioning_module(MANUAL_PARTITIONING.object_path, self._manual_part_module)

        self._custom_part_module = CustomPartitioningModule()
        self._add_partitioning_module(CUSTOM_PARTITIONING.object_path, self._custom_part_module)

        self._blivet_part_module = BlivetPartitioningModule()
        self._add_partitioning_module(BLIVET_PARTITIONING.object_path, self._blivet_part_module)

        # Connect modules to signals.
        self.storage_changed.connect(
            self._device_tree_module.on_storage_reset
        )
        self.storage_changed.connect(
            self._disk_init_module.on_storage_reset
        )
        self.storage_changed.connect(
            self._disk_selection_module.on_storage_reset
        )
        self.storage_changed.connect(
            self._snapshot_module.on_storage_reset
        )
        self.storage_changed.connect(
            self._bootloader_module.on_storage_reset
        )
        self._disk_selection_module.protected_devices_changed.connect(
            self.on_protected_devices_changed
        )
Exemple #50
0
def doConfiguration(storage, payload, ksdata):
    """Configure the installed system."""

    configuration_queue = TaskQueue("Configuration queue")
    # connect progress reporting
    configuration_queue.queue_started.connect(lambda x: progress_message(x.status_message))
    configuration_queue.task_completed.connect(lambda x: progress_step(x.name))

    # schedule the execute methods of ksdata that require an installed system to be present
    os_config = TaskQueue("Installed system configuration", N_("Configuring installed system"))
    os_config.append(Task("Configure authselect", ksdata.authselect.execute))
    os_config.append(Task("Configure SELinux", ksdata.selinux.execute))
    os_config.append(Task("Configure first boot tasks", ksdata.firstboot.execute))
    os_config.append(Task("Configure services", ksdata.services.execute))
    os_config.append(Task("Configure keyboard", ksdata.keyboard.execute))
    os_config.append(Task("Configure timezone", ksdata.timezone.execute))
    os_config.append(Task("Configure language", ksdata.lang.execute))
    os_config.append(Task("Configure firewall", ksdata.firewall.execute))
    os_config.append(Task("Configure X", ksdata.xconfig.execute))
    configuration_queue.append(os_config)

    # schedule network configuration (if required)
    if conf.system.provides_network_config:
        network_config = TaskQueue("Network configuration", N_("Writing network configuration"))
        network_config.append(Task("Network configuration",
                                   ksdata.network.execute, (payload, )))
        configuration_queue.append(network_config)

    # creating users and groups requires some pre-configuration.
    u = Users()
    user_config = TaskQueue("User creation", N_("Creating users"))
    user_config.append(Task("Configure root", ksdata.rootpw.execute, (storage, ksdata, u)))
    user_config.append(Task("Configure user groups", ksdata.group.execute, (storage, ksdata, u)))
    user_config.append(Task("Configure user", ksdata.user.execute, (storage, ksdata, u)))
    user_config.append(Task("Configure SSH key", ksdata.sshkey.execute, (storage, ksdata, u)))
    configuration_queue.append(user_config)

    # Anaconda addon configuration
    addon_config = TaskQueue("Anaconda addon configuration", N_("Configuring addons"))
    addon_config.append(Task("Configure Anaconda addons", ksdata.addons.execute, (storage, ksdata, u, payload)))
    configuration_queue.append(addon_config)

    # Initramfs generation
    generate_initramfs = TaskQueue("Initramfs generation", N_("Generating initramfs"))
    generate_initramfs.append(Task("Generate initramfs", payload.recreate_initrds))

    # This works around 2 problems, /boot on BTRFS and BTRFS installations where the initrd is
    # recreated after the first writeBootLoader call. This reruns it after the new initrd has
    # been created, fixing the kernel root and subvol args and adding the missing initrd entry.
    boot_on_btrfs = isinstance(storage.mountpoints.get("/"), BTRFSDevice)

    bootloader_proxy = STORAGE.get_proxy(BOOTLOADER)
    bootloader_enabled = bootloader_proxy.BootloaderMode != BOOTLOADER_DISABLED

    if isinstance(payload, LiveImagePayload) and boot_on_btrfs and bootloader_enabled:
        generate_initramfs.append(Task("Write BTRFS bootloader fix", write_boot_loader, (storage, payload)))

    # Invoking zipl should be the last thing done on a s390x installation (see #1652727).
    if arch.is_s390() and not conf.target.is_directory and bootloader_enabled:
        generate_initramfs.append(Task("Rerun zipl", lambda: util.execInSysroot("zipl", [])))

    configuration_queue.append(generate_initramfs)

    # join a realm (if required)
    if ksdata.realm.discovered:
        join_realm = TaskQueue("Realm join", N_("Joining realm: %s") % ksdata.realm.discovered)
        join_realm.append(Task("Join a realm", ksdata.realm.execute))
        configuration_queue.append(join_realm)

    post_scripts = TaskQueue("Post installation scripts", N_("Running post-installation scripts"))
    post_scripts.append(Task("Run post installation scripts", runPostScripts, (ksdata.scripts,)))
    configuration_queue.append(post_scripts)

    # setup kexec reboot if requested
    if flags.flags.kexec:
        kexec_setup = TaskQueue("Kexec setup", N_("Setting up kexec"))
        kexec_setup.append(Task("Setup kexec", setup_kexec))
        configuration_queue.append(kexec_setup)

    # write anaconda related configs & kickstarts
    write_configs = TaskQueue("Write configs and kickstarts", N_("Storing configuration files and kickstarts"))

    # Write the kickstart file to the installed system (or, copy the input
    # kickstart file over if one exists).
    if flags.flags.nosave_output_ks:
        # don't write the kickstart file to the installed system if this has
        # been disabled by the nosave option
        log.warning("Writing of the output kickstart to installed system has been disabled"
                    " by the nosave option.")
    else:
       # write anaconda related configs & kickstarts
        write_configs.append(Task("Store kickstarts", _writeKS, (ksdata,)))

    # Write out the user interaction config file.
    #
    # But make sure it's not written out in the image and directory installation mode,
    # as that might result in spokes being inadvertently hidden when the actual installation
    # starts from the generate image or directory contents.
    if conf.target.is_image:
        log.info("Not writing out user interaction config file due to image install mode.")
    elif conf.target.is_directory:
        log.info("Not writing out user interaction config file due to directory install mode.")
    else:
        write_configs.append(Task("Store user interaction config", screen_access.sam.write_out_config_file))

    # only add write_configs to the main queue if we actually store some kickstarts/configs
    if write_configs.task_count:
        configuration_queue.append(write_configs)

    # notify progress tracking about the number of steps
    progress_init(configuration_queue.task_count)
    # log contents of the main task queue
    log.info(configuration_queue.summary)

    # log tasks and queues when they are started
    # - note that we are using generators to add the counter
    queue_counter = util.item_counter(configuration_queue.queue_count)
    task_started_counter = util.item_counter(configuration_queue.task_count)
    task_completed_counter = util.item_counter(configuration_queue.task_count)
    configuration_queue.queue_started.connect(lambda x: log.info("Queue started: %s (%s)", x.name, next(queue_counter)))
    configuration_queue.task_started.connect(lambda x: log.info("Task started: %s (%s)", x.name, next(task_started_counter)))
    configuration_queue.task_completed.connect(lambda x: log.debug("Task completed: %s (%s) (%1.1f s)",
                                                                   x.name, next(task_completed_counter),
                                                                   x.elapsed_time))
    # start the task queue
    configuration_queue.start()
    # done
    progress_complete()
Exemple #51
0
    def on_back_clicked(self, button):
        # We can't exit early if it looks like nothing has changed because the
        # user might want to change settings presented in the dialogs shown from
        # within this method.

        # Do not enter this method multiple times if user clicking multiple times
        # on back button
        if self._back_clicked:
            return
        else:
            self._back_clicked = True

        # make sure the snapshot of unmodified on-disk-storage model is created
        if not on_disk_storage.created:
            on_disk_storage.create_snapshot(self.storage)

        if self.autopart_missing_passphrase:
            self._setup_passphrase()
            NormalSpoke.on_back_clicked(self, button)
            return

        # No disks selected?  The user wants to back out of the storage spoke.
        if not self.selected_disks:
            NormalSpoke.on_back_clicked(self, button)
            return

        disk_selection_changed = False
        if self._last_selected_disks:
            disk_selection_changed = (self._last_selected_disks != set(self.selected_disks))

        # remember the disk selection for future decisions
        self._last_selected_disks = set(self.selected_disks)

        if disk_selection_changed:
            # Changing disk selection is really, really complicated and has
            # always been causing numerous hard bugs. Let's not play the hero
            # game and just revert everything and start over again.
            on_disk_storage.reset_to_snapshot(self.storage)
            self.disks = getDisks(self.storage.devicetree)
        else:
            # Remove all non-existing devices if autopart was active when we last
            # refreshed.
            if self._previous_autopart:
                self._previous_autopart = False
                self._remove_nonexistant_partitions()

        # hide disks as requested
        self._hide_disks()

        # make sure no containers were split up by the user's disk selection
        self.clear_info()

        # if there are some disk selection errors we don't let user to leave the
        # spoke, so these errors don't have to go to self.errors
        self.disks_errors = checkDiskSelection(self.storage, self.selected_disks)
        if self.disks_errors:
            # The disk selection has to make sense before we can proceed.
            self.set_error(_("There was a problem with your disk selection. "
                             "Click here for details."))
            self._unhide_disks()
            self._back_clicked = False
            return

        if arch.is_s390():
            # check for unformatted DASDs and launch dasdfmt if any discovered
            rc = self._check_dasd_formats()
            if rc == DASD_FORMAT_NO_CHANGE:
                pass
            elif rc == DASD_FORMAT_REFRESH:
                # User hit OK on the dialog
                self.refresh()
            elif rc == DASD_FORMAT_RETURN_TO_HUB:
                # User clicked uri to return to hub.
                NormalSpoke.on_back_clicked(self, button)
                return
            else:
                # User either hit cancel on the dialog or closed it via escape,
                # there was no formatting done.
                self._back_clicked = False
                return

        # even if they're not doing autopart, setting autopart.encrypted
        # establishes a default of encrypting new devices
        self.encrypted = self._encrypted.get_active()

        # We might first need to ask about an encryption passphrase.
        if self.encrypted and not self._setup_passphrase():
            self._back_clicked = False
            return

        # At this point there are three possible states:
        # 1) user chose custom part => just send them to the CustomPart spoke
        # 2) user wants to reclaim some more space => run the ResizeDialog
        # 3) we are just asked to do autopart => check free space and see if we need
        #                                        user to do anything more
        self.autopart = not self._customPart.get_active()
        disks = [d for d in self.disks if d.name in self.selected_disks]
        dialog = None
        if not self.autopart:
            self.skipTo = "CustomPartitioningSpoke"
        elif self._reclaim.get_active():
            # HINT: change the logic of this 'if' statement if we are asked to
            # support "reclaim before custom partitioning"

            # respect disk selection and other choices in the ReclaimDialog
            self.apply()
            dialog = ResizeDialog(self.data, self.storage, self.payload)
            dialog.refresh(disks)
        else:
            dialog = self._check_space_and_get_dialog(disks)

        if dialog:
            # more dialogs may need to be run based on user choices, but we are
            # only interested in the final result
            rc = self._run_dialogs(disks, start_with=dialog)

            if rc == RESPONSE_OK:
                # nothing special needed
                pass
            elif rc == RESPONSE_CANCEL:
                # A cancel button was clicked on one of the dialogs.  Stay on this
                # spoke.  Generally, this is because the user wants to add more disks.
                self._back_clicked = False
                return
            elif rc == RESPONSE_MODIFY_SW:
                # The "Fedora software selection" link was clicked on one of the
                # dialogs.  Send the user to the software spoke.
                self.skipTo = "SoftwareSelectionSpoke"
            elif rc == RESPONSE_QUIT:
                # Not enough space, and the user can't do anything about it so
                # they chose to quit.
                raise SystemExit("user-selected exit")
            else:
                # I don't know how we'd get here, but might as well have a
                # catch-all.  Just stay on this spoke.
                self._back_clicked = False
                return

        if self.autopart:
            refreshAutoSwapSize(self.storage)
        self.applyOnSkip = True
        NormalSpoke.on_back_clicked(self, button)
Exemple #52
0
def sanity_check(storage, min_ram=isys.MIN_RAM):
    """
    Run a series of tests to verify the storage configuration.

    This function is called at the end of partitioning so that
    we can make sure you don't have anything silly (like no /,
    a really small /, etc).

    :param storage: an instance of the :class:`blivet.Blivet` class to check
    :param min_ram: minimum RAM (in MiB) needed for the installation with swap
                    space available
    :rtype: a list of SanityExceptions
    :return: a list of accumulated errors and warnings

    """

    exns = []

    checkSizes = [('/usr', Size("250 MiB")), ('/tmp', Size("50 MiB")), ('/var', Size("384 MiB")),
                  ('/home', Size("100 MiB")), ('/boot', Size("200 MiB"))]
    mustbeonlinuxfs = ['/', '/var', '/tmp', '/usr', '/home', '/usr/share', '/usr/lib']
    mustbeonroot = ['/bin', '/dev', '/sbin', '/etc', '/lib', '/root', '/mnt', 'lost+found', '/proc']

    filesystems = storage.mountpoints
    root = storage.fsset.root_device
    swaps = storage.fsset.swap_devices

    if root:
        if root.size < Size("250 MiB"):
            exns.append(
               SanityWarning(_("Your root partition is less than 250 "
                              "megabytes which is usually too small to "
                              "install %s.") % (productName,)))
    else:
        exns.append(
           SanityError(_("You have not defined a root partition (/), "
                        "which is required for installation of %s "
                        "to continue.") % (productName,)))

    # Prevent users from installing on s390x with (a) no /boot volume, (b) the
    # root volume on LVM, and (c) the root volume not restricted to a single
    # PV
    # NOTE: There is not really a way for users to create a / volume
    # restricted to a single PV.  The backend support is there, but there are
    # no UI hook-ups to drive that functionality, but I do not personally
    # care.  --dcantrell
    if arch.is_s390() and '/boot' not in storage.mountpoints and root:
        if root.type == 'lvmlv' and not root.single_pv:
            exns.append(
               SanityError(_("This platform requires /boot on a dedicated "
                            "partition or logical volume.  If you do not "
                            "want a /boot volume, you must place / on a "
                            "dedicated non-LVM partition.")))

    # FIXME: put a check here for enough space on the filesystems. maybe?

    for (mount, size) in checkSizes:
        if mount in filesystems and filesystems[mount].size < size:
            exns.append(
               SanityWarning(_("Your %(mount)s partition is less than "
                              "%(size)s which is lower than recommended "
                              "for a normal %(productName)s install.")
                            % {'mount': mount, 'size': size,
                               'productName': productName}))

    # storage.mountpoints is a property that returns a new dict each time, so
    # iterating over it is thread-safe.
    for (mount, device) in filesystems.items():
        problem = filesystems[mount].check_size()
        if problem < 0:
            exns.append(
               SanityError(_("Your %(mount)s partition is too small for %(format)s formatting "
                            "(allowable size is %(minSize)s to %(maxSize)s)")
                          % {"mount": mount, "format": device.format.name,
                             "minSize": device.min_size, "maxSize": device.max_size}))
        elif problem > 0:
            exns.append(
               SanityError(_("Your %(mount)s partition is too large for %(format)s formatting "
                            "(allowable size is %(minSize)s to %(maxSize)s)")
                          % {"mount":mount, "format": device.format.name,
                             "minSize": device.min_size, "maxSize": device.max_size}))

    if storage.bootloader and not storage.bootloader.skip_bootloader:
        stage1 = storage.bootloader.stage1_device
        if not stage1:
            exns.append(
               SanityError(_("No valid boot loader target device found. "
                            "See below for details.")))
            pe = _platform.stage1_missing_error
            if pe:
                exns.append(SanityError(_(pe)))
        else:
            storage.bootloader.is_valid_stage1_device(stage1)
            exns.extend(SanityError(msg) for msg in storage.bootloader.errors)
            exns.extend(SanityWarning(msg) for msg in storage.bootloader.warnings)

        stage2 = storage.bootloader.stage2_device
        if stage1 and not stage2:
            exns.append(SanityError(_("You have not created a bootable partition.")))
        else:
            storage.bootloader.is_valid_stage2_device(stage2)
            exns.extend(SanityError(msg) for msg in storage.bootloader.errors)
            exns.extend(SanityWarning(msg) for msg in storage.bootloader.warnings)
            if not storage.bootloader.check():
                exns.extend(SanityError(msg) for msg in storage.bootloader.errors)

        #
        # check that GPT boot disk on BIOS system has a BIOS boot partition
        #
        if _platform.weight(fstype="biosboot") and \
           stage1 and stage1.is_disk and \
           getattr(stage1.format, "labelType", None) == "gpt":
            missing = True
            for part in [p for p in storage.partitions if p.disk == stage1]:
                if part.format.type == "biosboot":
                    missing = False
                    break

            if missing:
                exns.append(
                   SanityError(_("Your BIOS-based system needs a special "
                                "partition to boot from a GPT disk label. "
                                "To continue, please create a 1MiB "
                                "'biosboot' type partition.")))

    if not swaps:
        installed = util.total_memory()
        required = Size("%s MiB" % (min_ram + isys.NO_SWAP_EXTRA_RAM))

        if installed < required:
            exns.append(
               SanityError(_("You have not specified a swap partition.  "
                            "%(requiredMem)s of memory is required to continue installation "
                            "without a swap partition, but you only have %(installedMem)s.")
                          % {"requiredMem": required,
                             "installedMem": installed}))
        else:
            exns.append(
               SanityWarning(_("You have not specified a swap partition.  "
                              "Although not strictly required in all cases, "
                              "it will significantly improve performance "
                              "for most installations.")))
    no_uuid = [s for s in swaps if s.format.exists and not s.format.uuid]
    if no_uuid:
        exns.append(
           SanityWarning(_("At least one of your swap devices does not have "
                          "a UUID, which is common in swap space created "
                          "using older versions of mkswap. These devices "
                          "will be referred to by device path in "
                          "/etc/fstab, which is not ideal since device "
                          "paths can change under a variety of "
                          "circumstances. ")))

    for (mountpoint, dev) in filesystems.items():
        if mountpoint in mustbeonroot:
            exns.append(
               SanityError(_("This mount point is invalid.  The %s directory must "
                            "be on the / file system.") % mountpoint))

        if mountpoint in mustbeonlinuxfs and (not dev.format.mountable or not dev.format.linux_native):
            exns.append(
               SanityError(_("The mount point %s must be on a linux file system.") % mountpoint))

    if storage.root_device and storage.root_device.format.exists:
        e = storage.must_format(storage.root_device)
        if e:
            exns.append(SanityError(e))

    exns += verify_LUKS_devices_have_key(storage)

    exns += check_mounted_partitions(storage)

    return exns
Exemple #53
0
 def should_run(cls, environment, data):
     # run only in the installer on s390(x) machines
     return flags.debug or (environment == ANACONDA_ENVIRON and arch.is_s390())
Exemple #54
0
    if product.isFinal:
        print("anaconda %s started." % verdesc)
    else:
        print("anaconda %s (pre-release) started." % verdesc)

    # we are past the --version and --help shortcut so we can import Blivet
    # now without slowing down anything critical

    # pylint: disable=import-error
    from blivet import arch

    if not opts.images and not opts.dirinstall:
        print(logs_note)
        # no fancy stuff like TTYs on a s390...
        if not arch.is_s390():
            if "TMUX" in os.environ and os.environ.get("TERM") == "screen":
                print(shell_and_tmux_note)
            else:
                print(shell_only_note)  # TMUX is not running
        # ...but there is apparently TMUX during the manual installation on s390!
        elif not opts.ksfile:
            print(tmux_only_note)  # but not during kickstart installation
        # no need to tell users how to switch to text mode
        # if already in text mode
        if opts.display_mode == 'g':
            print(text_mode_note)
        print(separate_attachements_note)

    from pyanaconda.anaconda import Anaconda
    anaconda = Anaconda()
Exemple #55
0
def setupDisplay(anaconda, options, addons=None):
    from pyanaconda.ui.tui.simpleline import App
    from pyanaconda.ui.tui.spokes.askvnc import AskVNCSpoke
    from pykickstart.constants import DISPLAY_MODE_TEXT
    from pyanaconda.nm import nm_is_connected, nm_is_connecting
    from blivet import arch

    graphical_failed = 0
    vncS = vnc.VncServer()          # The vnc Server object.
    vncS.anaconda = anaconda

    anaconda.displayMode = options.display_mode
    anaconda.isHeadless = arch.is_s390()

    if options.vnc:
        flags.usevnc = True
        anaconda.displayMode = 'g'
        vncS.password = options.vncpassword

        # Only consider vncconnect when vnc is a param
        if options.vncconnect:
            cargs = options.vncconnect.split(":")
            vncS.vncconnecthost = cargs[0]
            if len(cargs) > 1 and len(cargs[1]) > 0:
                if len(cargs[1]) > 0:
                    vncS.vncconnectport = cargs[1]

    if options.xdriver:
        anaconda.xdriver = options.xdriver
        anaconda.writeXdriver(root="/")

    if flags.rescue_mode:
        return

    if anaconda.ksdata.vnc.enabled:
        flags.usevnc = True
        anaconda.displayMode = 'g'

        if vncS.password == "":
            vncS.password = anaconda.ksdata.vnc.password

        if vncS.vncconnecthost == "":
            vncS.vncconnecthost = anaconda.ksdata.vnc.host

        if vncS.vncconnectport == "":
            vncS.vncconnectport = anaconda.ksdata.vnc.port

    if anaconda.displayMode == "g":
        import pkgutil
        import pyanaconda.ui

        mods = (tup[1] for tup in pkgutil.iter_modules(pyanaconda.ui.__path__, "pyanaconda.ui."))
        if "pyanaconda.ui.gui" not in mods:
            stdoutLog.warning("Graphical user interface not available, falling back to text mode")
            anaconda.displayMode = "t"
            flags.usevnc = False
            flags.vncquestion = False

    # disable VNC over text question when not enough memory is available
    if blivet.util.total_memory() < isys.MIN_GUI_RAM:
        stdoutLog.warning("Not asking for VNC because current memory (%d) < MIN_GUI_RAM (%d)", blivet.util.total_memory(), isys.MIN_GUI_RAM)
        flags.vncquestion = False

    # disable VNC question if text mode is requested and this is a ks install
    if anaconda.displayMode == 't' and flags.automatedInstall:
        stdoutLog.warning("Not asking for VNC because of an automated install")
        flags.vncquestion = False

    # disable VNC question if we were explicitly asked for text in kickstart
    if anaconda.ksdata.displaymode.displayMode == DISPLAY_MODE_TEXT:
        stdoutLog.warning("Not asking for VNC because text mode was explicitly asked for in kickstart")
        flags.vncquestion = False

    # disable VNC question if we don't have network
    if not nm_is_connecting() and not nm_is_connected():
        stdoutLog.warning("Not asking for VNC because we don't have a network")
        flags.vncquestion = False

    # disable VNC question if we don't have Xvnc
    if not os.access('/usr/bin/Xvnc', os.X_OK):
        stdoutLog.warning("Not asking for VNC because we don't have Xvnc")
        flags.vncquestion = False

    # Should we try to start Xorg?
    want_x = anaconda.displayMode == 'g' and \
             not (flags.preexisting_x11 or flags.usevnc)

    # X on a headless (e.g. s390) system? Nonsense!
    if want_x and anaconda.isHeadless:
        stdoutLog.warning(_("DISPLAY variable not set. Starting text mode."))
        anaconda.displayMode = 't'
        graphical_failed = 1
        time.sleep(2)
        want_x = False

    # Is Xorg is actually available?
    if want_x and not os.access("/usr/bin/Xorg", os.X_OK):
        stdoutLog.warning(_("Graphical installation is not available. "
                            "Starting text mode."))
        time.sleep(2)
        anaconda.displayMode = 't'
        want_x = False

    if anaconda.displayMode == 't' and flags.vncquestion:
        #we prefer vnc over text mode, so ask about that
        message = _("Text mode provides a limited set of installation "
                    "options. It does not offer custom partitioning for "
                    "full control over the disk layout. Would you like "
                    "to use VNC mode instead?")

        app = App("VNC Question")
        spoke = AskVNCSpoke(app, anaconda.ksdata, message=message)
        app.schedule_screen(spoke)
        app.run()

        if anaconda.ksdata.vnc.enabled:
            anaconda.displayMode = 'g'
            flags.usevnc = True
            vncS.password = anaconda.ksdata.vnc.password
        else:
            # user has explicitly specified text mode
            flags.vncquestion = False

    log.info("Display mode = %s", anaconda.displayMode)
    check_memory(anaconda, options)

    # check_memory may have changed the display mode
    want_x = want_x and (anaconda.displayMode == "g")
    if want_x:
        try:
            startX11()
            doStartupX11Actions()
        except (OSError, RuntimeError) as e:
            log.warning("X startup failed: %s", e)
            stdoutLog.warning("X startup failed, falling back to text mode")
            anaconda.displayMode = 't'
            graphical_failed = 1
            time.sleep(2)

        if not graphical_failed:
            doExtraX11Actions(options.runres)

    if anaconda.displayMode == 't' and graphical_failed and \
         flags.vncquestion and not anaconda.ksdata.vnc.enabled:
        app = App("VNC Question")
        spoke = AskVNCSpoke(app, anaconda.ksdata)
        app.schedule_screen(spoke)
        app.run()

        if anaconda.ksdata.vnc.enabled:
            anaconda.displayMode = 'g'
            flags.usevnc = True
            vncS.password = anaconda.ksdata.vnc.password

    # if they want us to use VNC do that now
    if anaconda.displayMode == 'g' and flags.usevnc:
        vncS.startServer()
        doStartupX11Actions()

    # with X running we can initialize the UI interface
    anaconda.initInterface(addons)

    anaconda.instClass.configure(anaconda)

    # report if starting the GUI failed
    anaconda.gui_startup_failed = bool(graphical_failed)