def run(self): """ Calculates download package list and then calls run_format and run_install. Takes care of the exceptions, too. """ try: # Before formatting, let's try to calculate package download list # this way, if something fails (a missing package, mostly) we have # not formatted anything yet. self.create_metalinks_list() self.queue_event('info', _("Getting your disk(s) ready for Antergos...")) with misc.raised_privileges() as __: if self.settings.get('is_iso'): self.install_screen.run_format() path = "/tmp/.cnchi_partitioning_completed" with open(path, 'w') as part_file: part_file.write("# File created by Cnchi to force\n") part_file.write("# users to reboot before retry\n") part_file.write("# formatting their hard disk(s)\n") path = "/tmp/.cnchi_partitioning_completed" with open(path, 'w') as part_file: part_file.write("# File created by Cnchi to force\n") part_file.write("# users to reboot before retry\n") part_file.write("# formatting their hard disk(s)\n") self.queue_event('info', _("Installation will start now!")) with misc.raised_privileges() as __: if self.settings.get('is_iso'): self.install_screen.run_install(self.pkg.packages, self.down.metalinks) except subprocess.CalledProcessError as process_error: txt = "Error running command {0}: {1}".format( process_error.cmd, process_error.output) logging.error(txt) exc_type, exc_value, exc_traceback = sys.exc_info() trace = traceback.format_exception(exc_type, exc_value, exc_traceback) for line in trace: logging.error(line.rstrip()) txt = _("Error running command {0}: {1}").format( process_error.cmd, process_error.output) self.queue_fatal_event(txt) except (misc.InstallError, pyalpm.error, KeyboardInterrupt, TypeError, AttributeError, OSError, IOError) as install_error: logging.error(install_error) exc_type, exc_value, exc_traceback = sys.exc_info() trace = traceback.format_exception(exc_type, exc_value, exc_traceback) for line in trace: logging.error(line.rstrip()) self.queue_fatal_event(install_error)
def run_rankmirrors(self): if os.path.exists("/usr/bin/rankmirrors"): # Uncomment Antergos mirrors and comment out auto selection so # rankmirrors can find the best mirror. self.uncomment_antergos_mirrors() with misc.raised_privileges(): try: # Store rankmirrors output in a temporary file with tempfile.TemporaryFile(mode='w+t') as temp_file: cmd = [ 'rankmirrors', '-n', '0', '-r', 'antergos', self.antergos_mirrorlist] subprocess.call(cmd, stdout=temp_file) temp_file.seek(0) # Copy new mirrorlist to the old one with open(self.antergos_mirrorlist, 'w') as antergos_mirrorlist_file: antergos_mirrorlist_file.write(temp_file.read()) except subprocess.CalledProcessError as why: logging.debug( 'Cannot run rankmirrors on Antergos mirrorlist: %s', why) self.sync()
def uncomment_antergos_mirrors(self): """ Uncomment Antergos mirrors and comment out auto selection so rankmirrors can find the best mirror. """ autoselect = "http://mirrors.antergos.com/$repo/$arch" if os.path.exists(self.antergos_mirrorlist): with open(self.antergos_mirrorlist) as mirrors: lines = [x.strip() for x in mirrors.readlines()] for i in range(len(lines)): if lines[i].startswith("Server") and autoselect in lines[i]: # Comment out auto selection lines[i] = "#" + lines[i] elif lines[i].startswith("#Server") and autoselect not in lines[i]: # Uncomment Antergos mirror lines[i] = lines[i].lstrip("#") # sourceforge server does not get updated as often as necessary if "sourceforge" in lines[i]: lines[i] = "#" + lines[i] with misc.raised_privileges(): # Write new one with open(self.antergos_mirrorlist, 'w') as mirrors: mirrors.write("\n".join(lines) + "\n") self.sync()
def uncomment_antergos_mirrors(self): """ Uncomment Antergos mirrors and comment out auto selection so rankmirrors can find the best mirror. """ autoselect = "http://mirrors.antergos.com/$repo/$arch" autoselect_on = True autoselect_sf = True if os.path.exists(self.antergos_mirrorlist): with open(self.antergos_mirrorlist) as mirrors: lines = [x.strip() for x in mirrors.readlines()] for i in range(len(lines)): srv_comment = lines[i].startswith("#Server") srv = lines[i].startswith("Server") if autoselect_on and srv and autoselect in lines[i]: # Comment out auto selection lines[i] = "#" + lines[i] autoselect_on = False elif autoselect_sf and srv and 'sourceforge' in lines[i]: # Comment out sourceforge auto selection url lines[i] = "#" + lines[i] autoselect_sf = False elif srv_comment and autoselect not in lines[ i] and 'sourceforge' not in lines[i]: # Uncomment Antergos mirror lines[i] = lines[i].lstrip("#") with misc.raised_privileges() as __: # Write new one with open(self.antergos_mirrorlist, 'w') as mirrors: mirrors.write("\n".join(lines) + "\n") self.sync()
def uncomment_antergos_mirrors(self): """ Uncomment Antergos mirrors and comment out auto selection so rankmirrors can find the best mirror. """ autoselect = "http://mirrors.antergos.com/$repo/$arch" autoselect_on = True autoselect_sf = True if os.path.exists(self.antergos_mirrorlist): with open(self.antergos_mirrorlist) as mirrors: lines = [x.strip() for x in mirrors.readlines()] for i in range(len(lines)): srv_comment = lines[i].startswith("#Server") srv = lines[i].startswith("Server") if autoselect_on and srv and autoselect in lines[i]: # Comment out auto selection lines[i] = "#" + lines[i] autoselect_on = False elif autoselect_sf and srv and 'sourceforge' in lines[i]: # Comment out sourceforge auto selection url lines[i] = "#" + lines[i] autoselect_sf = False elif srv_comment and autoselect not in lines[i] and 'sourceforge' not in lines[i]: # Uncomment Antergos mirror lines[i] = lines[i].lstrip("#") with misc.raised_privileges(): # Write new one with open(self.antergos_mirrorlist, 'w') as mirrors: mirrors.write("\n".join(lines) + "\n") self.sync()
def fill_device_list(self): """ Fill the partition list with all the data. """ # We will store our data model in 'device_list_store' if self.device_list_store is not None: self.device_list_store.clear() self.device_list_store = Gtk.TreeStore(bool, bool, bool, str, int, str, str) with misc.raised_privileges() as __: devices = parted.getAllDevices() self.get_ids() for dev in devices: # Skip cdrom, raid, lvm volumes or encryptfs if (not dev.path.startswith("/dev/sr") and not dev.path.startswith("/dev/mapper")): size_in_gigabytes = int( (dev.length * dev.sectorSize) / 1000000000) # Use check | Disk (sda) | Size(GB) | Name (device name) if dev.path.startswith("/dev/"): path = dev.path[len("/dev/"):] else: path = dev.path disk_id = self.ids.get(path, "") row = [ False, True, True, path, size_in_gigabytes, dev.model, disk_id ] self.device_list_store.append(None, row) self.device_list.set_model(self.device_list_store)
def fill_device_list(self): """ Fill the partition list with all the data. """ # We will store our data model in 'device_list_store' if self.device_list_store is not None: self.device_list_store.clear() self.device_list_store = Gtk.TreeStore(bool, bool, bool, str, int, str, str) with misc.raised_privileges(): devices = parted.getAllDevices() self.get_ids() for dev in devices: # Skip cdrom, raid, lvm volumes or encryptfs if not dev.path.startswith("/dev/sr") and not dev.path.startswith("/dev/mapper"): size_in_gigabytes = int((dev.length * dev.sectorSize) / 1000000000) # Use check | Disk (sda) | Size(GB) | Name (device name) if dev.path.startswith("/dev/"): path = dev.path[len("/dev/") :] else: path = dev.path disk_id = self.ids.get(path, "") row = [False, True, True, path, size_in_gigabytes, dev.model, disk_id] self.device_list_store.append(None, row) self.device_list.set_model(self.device_list_store)
def populate_devices(self): """ Fill list with devices """ with misc.raised_privileges() as __: device_list = parted.getAllDevices() self.device_store.remove_all() self.devices = {} self.bootloader_device_entry.remove_all() self.bootloader_devices.clear() for dev in device_list: # avoid cdrom and any raid, lvm volumes or encryptfs if not dev.path.startswith("/dev/sr") and \ not dev.path.startswith("/dev/mapper"): # hard drives measure themselves assuming kilo=1000, mega=1mil, etc size_in_gigabytes = int((dev.length * dev.sectorSize) / 1000000000) line = '{0} [{1} GB] ({2})' line = line.format(dev.model, size_in_gigabytes, dev.path) self.device_store.append_text(line) self.devices[line] = dev.path self.bootloader_device_entry.append_text(line) self.bootloader_devices[line] = dev.path logging.debug(line) self.select_first_combobox_item(self.device_store) self.select_first_combobox_item(self.bootloader_device_entry)
def populate_devices(self): """ Fill list with devices """ with misc.raised_privileges() as __: device_list = parted.getAllDevices() self.device_store.remove_all() self.devices = {} self.bootloader_device_entry.remove_all() self.bootloader_devices.clear() for dev in device_list: # avoid cdrom and any raid, lvm volumes or encryptfs if not dev.path.startswith("/dev/sr") and \ not dev.path.startswith("/dev/mapper"): # hard drives measure themselves assuming kilo=1000, mega=1mil, etc size_in_gigabytes = int( (dev.length * dev.sectorSize) / 1000000000) line = '{0} [{1} GB] ({2})' line = line.format(dev.model, size_in_gigabytes, dev.path) self.device_store.append_text(line) self.devices[line] = dev.path self.bootloader_device_entry.append_text(line) self.bootloader_devices[line] = dev.path logging.debug(line) self.select_first_combobox_item(self.device_store) self.select_first_combobox_item(self.bootloader_device_entry)
def update_cnchi(): """ Runs updater function to update cnchi to the latest version if necessary """ upd = updater.Updater( force_update=cmd_line.update, local_cnchi_version=info.CNCHI_VERSION) if upd.update(): logging.info("Program updated! Restarting...") misc.remove_temp_files() if cmd_line.update: # Remove -u and --update options from new call new_argv = [] for argv in sys.argv: if argv != "-u" and argv != "--update": new_argv.append(argv) else: new_argv = sys.argv # Do not try to update again now new_argv.append("--disable-update") # Run another instance of Cnchi (which will be the new version) with misc.raised_privileges(): os.execl(sys.executable, *([sys.executable] + new_argv)) sys.exit(0)
def sync(): """ Synchronize cached writes to persistent storage """ with misc.raised_privileges(): try: subprocess.check_call(['sync']) except subprocess.CalledProcessError as why: logging.warning( "Can't synchronize cached writes to persistent storage: %s", why)
def load_zfs(): cmd = ["modprobe", "zfs"] try: with misc.raised_privileges(): subprocess.check_output(cmd, stderr=subprocess.STDOUT) logging.debug("ZFS kernel module loaded successfully.") except subprocess.CalledProcessError as err: logging.debug("Can't load ZFS kernel module: %s", err.output.decode()) return False return True
def unzip_and_copy(self, zip_path): """ Unzip (decompress) a zip file using zipfile standard module and copy cnchi's files to their destinations """ import zipfile dst_dir = "/tmp" # First check all md5 signatures all_md5_ok = True with zipfile.ZipFile(zip_path) as zip_file: # Check md5 sums for member in zip_file.infolist(): zip_file.extract(member, dst_dir) full_path = os.path.join(dst_dir, member.filename) dst_full_path = os.path.join( "/usr/share/cnchi", full_path.split("/tmp/Cnchi-master/")[1]) if os.path.isfile(dst_full_path): if dst_full_path in self.md5s: if ("update.info" not in dst_full_path and self.md5s[dst_full_path] != get_md5_from_file(full_path)): logging.warning( _("Wrong md5 (%s). Bad download or wrong file, " "Cnchi won't update itself"), member.filename) all_md5_ok = False break else: logging.warning( _("File %s is not in md5 signatures list"), member.filename) if all_md5_ok: # All md5 sums where ok. Let's copy all files for member in zip_file.infolist(): full_path = os.path.join(dst_dir, member.filename) dst_full_path = os.path.join( "/usr/share/cnchi", full_path.split("/tmp/Cnchi-master/")[1]) if os.path.isfile(dst_full_path): try: with misc.raised_privileges(): logging.debug( _("Copying %s to %s..."), full_path, dst_full_path) shutil.copyfile(full_path, dst_full_path) except FileNotFoundError as file_error: logging.error( _("Can't copy %s to %s"), full_path, dst_full_path) logging.error(file_error)
def load_zfs(): cmd = ["modprobe", "zfs"] try: with misc.raised_privileges(): subprocess.check_output(cmd, stderr=subprocess.STDOUT) logging.debug("ZFS kernel module loaded successfully.") except subprocess.CalledProcessError as err: logging.debug( "Can't load ZFS kernel module: %s", err.output.decode()) return False return True
def unzip_and_copy(self, zip_path): """ Unzip (decompress) a zip file using zipfile standard module and copy cnchi's files to their destinations """ import zipfile dst_dir = "/tmp" # First check all md5 signatures all_md5_ok = True with zipfile.ZipFile(zip_path) as zip_file: # Check md5 sums for member in zip_file.infolist(): zip_file.extract(member, dst_dir) full_path = os.path.join(dst_dir, member.filename) dst_full_path = os.path.join( "/usr/share/cnchi", full_path.split("/tmp/Cnchi-master/")[1]) if os.path.isfile(dst_full_path): if dst_full_path in self.md5s: if ("update.info" not in dst_full_path and self.md5s[dst_full_path] != get_md5_from_file(full_path)): logging.warning( _("Wrong md5 (%s). Bad download or wrong file, " "Cnchi won't update itself"), member.filename) all_md5_ok = False break else: logging.warning( _("File %s is not in md5 signatures list"), member.filename) if all_md5_ok: # All md5 sums where ok. Let's copy all files for member in zip_file.infolist(): full_path = os.path.join(dst_dir, member.filename) dst_full_path = os.path.join( "/usr/share/cnchi", full_path.split("/tmp/Cnchi-master/")[1]) if os.path.isfile(dst_full_path): try: with misc.raised_privileges(): logging.debug(_("Copying %s to %s..."), full_path, dst_full_path) shutil.copyfile(full_path, dst_full_path) except FileNotFoundError as file_error: logging.error(_("Can't copy %s to %s"), full_path, dst_full_path) logging.error(file_error)
def filter_and_sort_arch_mirrorlist(self): output = '# Arch Linux mirrorlist generated by Cnchi #\n' mlist = self.get_mirror_stats() mirrors = self.sort_mirrors_by_speed(mirrors=mlist) for mirror in mirrors: self.arch_mirrorlist_ranked.append(mirror['url']) output += "Server = {0}{1}/os/{2}\n".format( mirror['url'], '$repo', '$arch') # Write modified Arch mirrorlist with misc.raised_privileges() as __: with open(self.arch_mirrorlist, 'w') as arch_mirrors: arch_mirrors.write(output) self.sync()
def load_codes(self): """ Load keyboard codes """ if self.layout is None: return cmd = [ "/usr/share/cnchi/scripts/ckbcomp", "-model", "pc106", "-layout", self.layout] if self.variant: cmd.extend(["-variant", self.variant]) cmd.append("-compact") try: with raised_privileges() as privileged: cfile = call(cmd).split('\n') except subprocess.CalledProcessError as process_error: logging.error( "Error running command %s: %s", process_error.cmd, process_error) return # Clear current codes del self.codes[:] for line in cfile: if line[:7] != "keycode": continue codes = line.split('=')[1].strip().split(' ') plain = unicode_to_string(codes[0]) shift = unicode_to_string(codes[1]) ctrl = unicode_to_string(codes[2]) alt = unicode_to_string(codes[3]) if ctrl == plain: ctrl = "" if alt == plain: alt = "" self.codes.append((plain, shift, ctrl, alt))
def filter_and_sort_arch_mirrorlist(self): output = '# Arch Linux mirrorlist generated by Cnchi #\n' mlist = self.get_mirror_stats() mirrors = self.sort_mirrors_by_speed(mirrors=mlist) for mirror in mirrors: self.arch_mirrorlist_ranked.append(mirror['url']) output += "Server = {0}{1}/os/{2}\n".format( mirror['url'], '$repo', '$arch' ) # Write modified Arch mirrorlist with misc.raised_privileges(): with open(self.arch_mirrorlist, 'w') as arch_mirrors: arch_mirrors.write(output) self.sync()
def ensured_executable(cmd): """ Ensures file is executable before attempting to execute it. Args: cmd (list): The command to check. Returns: True if successful, False otherwise. """ cmd = list(cmd) if cmd and not shutil.which(cmd[0]) and os.path.exists(cmd[0]): try: os.chmod(cmd[0], 0o777) except Exception: with raised_privileges() as __: os.chmod(cmd[0], 0o777) return shutil.which(cmd[0]) is not None
def update_mirrorlist(self): """ Make sure we have the latest antergos-mirrorlist files """ with misc.raised_privileges(): try: cmd = [ 'pacman', '-Syy', '--noconfirm', '--noprogressbar', '--quiet', 'antergos-mirrorlist'] with open(os.devnull, 'w') as fnull: subprocess.call(cmd, stdout=fnull, stderr=subprocess.STDOUT) # Use the new downloaded mirrorlist (.pacnew) files (if any) pacnew_path = self.antergos_mirrorlist + ".pacnew" if os.path.exists(pacnew_path): shutil.copy(pacnew_path, self.antergos_mirrorlist) except subprocess.CalledProcessError as why: logging.debug('Cannot update antergos-mirrorlist package: %s', why) except OSError as why: logging.debug('Error copying new mirrorlist files: %s', why) self.sync()
def update_cnchi(): """ Runs updater function to update cnchi to the latest version if necessary """ upd = updater.Updater(force_update=cmd_line.update, local_cnchi_version=info.CNCHI_VERSION) if upd.update(): logging.info("Program updated! Restarting...") misc.remove_temp_files() if cmd_line.update: # Remove -u and --update options from new call new_argv = [] for argv in sys.argv: if argv != "-u" and argv != "--update": new_argv.append(argv) else: new_argv = sys.argv # Do not try to update again now new_argv.append("--disable-update") # Run another instance of Cnchi (which will be the new version) with misc.raised_privileges() as __: os.execl(sys.executable, *([sys.executable] + new_argv)) sys.exit(0)
def start(self): """ Run installation """ # From this point, on a warning situation, Cnchi should try to continue, # so we need to catch the exception here. If we don't catch the exception # here, it will be catched in run() and managed as a fatal error. # On the other hand, if we want to clarify the exception message we can # catch it here and then raise an InstallError exception. if not os.path.exists(DEST_DIR): with misc.raised_privileges(): os.makedirs(DEST_DIR, mode=0o755, exist_ok=True) msg = _("Installing using the '{0}' method").format(self.method) self.queue_event("info", msg) # Mount needed partitions (in automatic it's already done) if self.method in ["alongside", "advanced", "zfs"]: self.mount_partitions() # Nasty workaround: # If pacman was stoped and /var is in another partition than root # (so as to be able to resume install), database lock file will still # be in place. We must delete it or this new installation will fail db_lock = os.path.join(DEST_DIR, "var/lib/pacman/db.lck") if os.path.exists(db_lock): with misc.raised_privileges(): os.remove(db_lock) logging.debug("%s deleted", db_lock) # Create some needed folders folders = [ os.path.join(DEST_DIR, "var/lib/pacman"), os.path.join(DEST_DIR, "etc/pacman.d/gnupg"), os.path.join(DEST_DIR, "var/log"), ] for folder in folders: os.makedirs(folder, mode=0o755, exist_ok=True) # If kernel images exists in /boot they are most likely from a failed # install attempt and need to be removed otherwise pyalpm will raise a # fatal exception later on. kernel_imgs = ( "/install/boot/vmlinuz-linux", "/install/boot/vmlinuz-linux-lts", "/install/boot/initramfs-linux.img", "/install/boot/initramfs-linux-fallback.img", "/install/boot/initramfs-linux-lts.img", "/install/boot/initramfs-linux-lts-fallback.img", ) for img in kernel_imgs: if os.path.exists(img): os.remove(img) logging.debug("Preparing pacman...") self.prepare_pacman() logging.debug("Pacman ready") # Run pre-install scripts (only catalyst does something here atm) # Note: Catalyst is disabled in catalyst.py try: logging.debug("Running hardware drivers pre-install jobs...") proprietary = self.settings.get("feature_graphic_drivers") self.hardware_install = hardware.HardwareInstall(use_proprietary_graphic_drivers=proprietary) self.hardware_install.pre_install(DEST_DIR) except Exception as ex: template = "Error in hardware module. An exception of type {0} occured. Arguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) logging.error(message) logging.debug("Downloading packages...") self.download_packages() # This mounts (binds) /dev and others to /DEST_DIR/dev and others special_dirs.mount(DEST_DIR) logging.debug("Installing packages...") self.install_packages() logging.debug("Configuring system...") self.configure_system() # This unmounts (unbinds) /dev and others to /DEST_DIR/dev and others special_dirs.umount(DEST_DIR) # Finally, try to unmount DEST_DIR auto_partition.unmount_all_in_directory(DEST_DIR) self.running = False # Installation finished successfully self.queue_event("finished", _("Installation finished")) self.error = False return True