def syncDownDirectory(self, dirname, quiet=False): assert self.wSshPort is not None assert dirname.startswith("/") dirname = os.path.realpath(dirname) stunnelCfgFile, newPort, proc = self._createStunnelProcess(self.wRsyncPort) try: cmd = "" cmd += "rsync -a -z -hhh --delete %s " % ("--quiet" if quiet else "--info=progress2") for fn in self._ignoredPatternsWhenSyncDown(): cmd += "-f '- %s' " % (fn) if True: buf = "-f '+ %s/***' " % (dirname) dirname = os.path.dirname(dirname) while dirname != "/": buf = "-f '+ %s' " % (dirname) + buf dirname = os.path.dirname(dirname) cmd += buf cmd += "-f '- /**' " cmd += "rsync://127.0.0.1:%d/main /" % (newPort) FmUtil.shellExec(cmd) finally: proc.terminate() proc.wait() os.unlink(stunnelCfgFile)
def _save(self): if not os.path.exists(self.filename) and len(self.lineList) == 0: return FmUtil.ensureAncesterDir(self.filename) with open(self.filename, "w") as f: for line in self.lineList: f.write(line + "\n")
def doEnableSwap(self): if self.param.runMode not in ["normal", "setup"]: print("Operation is not supported in \"%s\" mode." % (self.param.runMode), file=sys.stderr) return 1 layout = strict_hdds.get_storage_layout() if layout is None: print("Invalid storage layout.", file=sys.stderr) return 1 self.param.swapManager.enableSwap(layout) if layout.name in ["bios-ext4", "efi-ext4"]: swapSizeStr = FmUtil.formatSize(os.path.getsize(layout.dev_swap)) print("Swap File: %s (size:%s)" % (layout.dev_swap, swapSizeStr)) elif layout.name in ["efi-bcache-btrfs", "efi-bcachefs"]: uuid = pyudev.Device.from_device_file( pyudev.Context(), layout.dev_swap).get("ID_FS_UUID") swapSizeStr = FmUtil.formatSize( FmUtil.getBlkDevSize(layout.dev_swap)) print("Swap Partition: %s (UUID:%s, size:%s)" % (layout.dev_swap, uuid, swapSizeStr)) else: assert False return 0
def updateDownloadCommand(self): fetchCmd = "/usr/libexec/robust_layer/wget -q --show-progress -O \\\"\\${DISTDIR}/\\${FILE}\\\" \\\"\\${URI}\\\"" resumeCmd = "/usr/libexec/robust_layer/wget -q --show-progress -c -O \\\"\\${DISTDIR}/\\${FILE}\\\" \\\"\\${URI}\\\"" FmUtil.setMakeConfVar(FmConst.portageCfgMakeConf, "FETCHCOMMAND", fetchCmd) FmUtil.setMakeConfVar(FmConst.portageCfgMakeConf, "RESUMECOMMAND", resumeCmd)
def info(self): manuStrList = [ "ASUSTeK COMPUTER INC.", ] self.manu = FmUtil.dmiDecodeWithCache("system-manufacturer") if self.manu not in manuStrList: return None self.model = FmUtil.dmiDecodeWithCache("system-product-name") self.sn = FmUtil.dmiDecodeWithCache("system-serial-number") ret = HwInfoPcBranded() ret.vendor = "ASUS" ret.model = self.model ret.hwSpec = self._hwSpec() ret.serialNumber = self.sn ret.arch = "amd64" ret.chassis_type = self._chassisType() ret.hwDict = _UtilHwDict.get(ret.hwSpec) ret.changeList = self._changeList(ret.hwSpec, ret.hwDict) ret.kernelCfgRules = self._kernelCfgRules() ret.useFlags = self._useFlags() ret.grubExtraWaitTime = 0 return ret
def _fillUsrSrcLinux(self, bootDir, dstDir): flist = glob.glob(os.path.join(bootDir, "config-*")) flist = [x for x in flist if not x.endswith(".rules")] if flist != []: fn = flist[0] # example: /mnt/gentoo/boot/config-x86_64-4.11.9 shutil.copyfile(fn, os.path.join(dstDir, ".config")) ver = os.path.basename(fn).split("-")[2] version = ver.split(".")[0] patchlevel = ver.split(".")[1] sublevel = ver.split(".")[2] else: with gzip.open("/proc/config.gz", "rb") as f_in: with open(os.path.join(dstDir, ".config"), "wb") as f_out: shutil.copyfileobj(f_in, f_out) version = FmUtil.shellCall("uname -r | /usr/bin/cut -d '.' -f 1") patchlevel = FmUtil.shellCall("uname -r | /usr/bin/cut -d '.' -f 2") sublevel = FmUtil.shellCall("uname -r | /usr/bin/cut -d '.' -f 3") with open(os.path.join(dstDir, "Makefile"), "w") as f: f.write("# Faked by fpemud-refsystem to fool linux-info.eclass\n") f.write("\n") f.write("VERSION = %s\n" % (version)) f.write("PATCHLEVEL = %s\n" % (patchlevel)) f.write("SUBLEVEL = %s\n" % (sublevel)) f.write("EXTRAVERSION = \n")
def setUser(self, username, password): """deal with both add and modify""" if self.filename is None: return if not os.path.exists(self.filename): self._tdbFileCreate() inStr = "" inStr += "%s\n" % (password) inStr += "%s\n" % (password) FmUtil.cmdCallWithInput("pdbedit", inStr, "-d", "0", "-b", "tdbsam:%s" % (self.filename), "-a", username, "-t")
def sshExec(self, *args, base64=False): assert self.wSshPort is not None # FIXME: ssh sucks that it must use a shell to execute remote command if base64: args2 = list(args) for i in range(1, len(args2)): args2[i] = base64.b64encode(args2[i].encode("ascii")).decode("ascii") else: args2 = args # "-t" can get Ctrl+C controls remote process # XXXXX so that we forward signal to remote process, FIXME cmd = "ssh -t -e none -p %d -F %s %s %s" % (self.wSshPort, self.cfgFile, self.hostname, " ".join(args2)) FmUtil.shellExec(cmd)
def hasUser(self, username): if self.filename is None: return False if not os.path.exists(self.filename): return False ret = FmUtil.cmdCall("pdbedit", "-d", "0", "-b", "tdbsam:%s" % (self.filename), "-L") return re.search("^%s:[0-9]+:$" % (username), ret, re.M) is not None
def hwInfo(self): ret = None if ret is None: ret = _PcHp().info() if ret is None: ret = _PcAsus().info() if ret is None: ret = _PcAliyun().info() if ret is None: ret = _PcDiy().info() if ret is None: return ret r = FmUtil.getMachineInfoWithCache("CHASSIS") if r is not None: if r == "computer": ret.chassis_type = ChassisType.COMPUTER elif r == "laptop": ret.chassis_type = ChassisType.LAPTOP elif r == "tablet": ret.chassis_type = ChassisType.TABLET elif r == "handset": ret.chassis_type = ChassisType.HANDSET elif r == "headless": ret.chassis_type = ChassisType.HEADLESS else: assert False return ret
def syncUp(self): # enter stage self._sendRequestObj({ "command": "stage-syncup", }) resp = self._recvReponseObj() if "error" in resp: raise Exception(resp["error"]) assert resp["return"]["stage"] == "syncup" # rsync stunnelCfgFile, newPort, proc = self._createStunnelProcess(resp["return"]["rsync-port"]) try: cmd = "" cmd += "rsync -a -z -hhh --delete --delete-excluded --partial --info=progress2 " for fn in self._ignoredPatternsWhenSyncUp(): cmd += "-f '- %s' " % (fn) cmd += "-f '+ /bin' " # /bin may be a symlink or directory cmd += "-f '+ /bin/***' " cmd += "-f '+ /boot/***' " cmd += "-f '+ /etc/***' " cmd += "-f '+ /lib' " # /lib may be a symlink or directory cmd += "-f '+ /lib/***' " cmd += "-f '+ /lib32' " # /lib32 may be a symlink or directory cmd += "-f '+ /lib32/***' " cmd += "-f '+ /lib64' " # /lib64 may be a symlink or directory cmd += "-f '+ /lib64/***' " cmd += "-f '+ /opt/***' " cmd += "-f '+ /sbin' " # /sbin may be a symlink or directory cmd += "-f '+ /sbin/***' " cmd += "-f '+ /usr/***' " cmd += "-f '+ /var' " cmd += "-f '+ /var/cache' " cmd += "-f '+ /var/cache/edb/***' " cmd += "-f '+ /var/cache/portage/***' " cmd += "-f '+ /var/db' " cmd += "-f '+ /var/db/pkg/***' " cmd += "-f '+ /var/lib' " cmd += "-f '+ /var/lib/portage/***' " cmd += "-f '- /**' " cmd += "/ rsync://127.0.0.1:%d/main" % (newPort) FmUtil.shellExec(cmd) finally: proc.terminate() proc.wait() os.unlink(stunnelCfgFile)
def updateCcache(self): value = FmUtil.getMakeConfVar(FmConst.portageCfgMakeConf, "FEATURES") if value != "": valueList = value.split(" ") else: valueList = [] c = CcacheLocalService() if c.is_enabled(): if "ccache" not in valueList: valueList.append("ccache") FmUtil.setMakeConfVar(FmConst.portageCfgMakeConf, "FEATURES", " ".join(valueList)) else: if "ccache" in valueList: valueList.remove("ccache") FmUtil.setMakeConfVar(FmConst.portageCfgMakeConf, "FEATURES", " ".join(valueList))
def installPackage(self, pkgName, bTest): # modify dynamic config self.infoPrinter.printInfo(">> Preparing...") if True: dcm = DynCfgModifier() dcm.updateMirrors() dcm.updateDownloadCommand() dcm.updateParallelism(self.param.machineInfoGetter.hwInfo()) dcm.updateCcache() print("") # get build server if BuildServerSelector.hasBuildServerCfgFile(): self.infoPrinter.printInfo(">> Selecting build server...") buildServer = BuildServerSelector.selectBuildServer() print("") else: buildServer = None # sync up files to server if buildServer is not None: self.infoPrinter.printInfo(">> Synchronizing up...") buildServer.syncUp() buildServer.startWorking() print("") # emerge package self.infoPrinter.printInfo(">> Installing %s..." % (pkgName)) cmd = "/usr/libexec/fpemud-os-sysman/op-emerge-package.py" tmpOp = "1" if bTest else "0" if buildServer is not None: try: buildServer.sshExec(cmd, pkgName, tmpOp) finally: self.infoPrinter.printInfo( ">> Synchronizing down system files...") buildServer.syncDownSystem() print("") else: FmUtil.cmdExec(cmd, pkgName, tmpOp) # end remote build if buildServer is not None: buildServer.dispose()
def _updateUseFlag(self, pretendCmd2): fn = os.path.join(FmConst.portageCfgUseDir, "99-autouse") useLine = [] useMap = dict() rc, out = FmUtil.shellCallWithRetCode(pretendCmd2) bStart = False for line in out.split("\n"): if not bStart: if line == "The following USE changes are necessary to proceed:": bStart = True continue if line == "": bStart = False break if line.startswith(" ") or line.startswith("#"): continue tlist = line.split(" ") pkgAtom = tlist[0] useList = tlist[1:] if any(x.startswith("-") for x in useList): return # unable to process USE flag "-*" useLine.append((tlist[0], tlist[1:])) if useLine == []: return False for pkgAtom, useList in FmUtil.portageParseCfgUseFile(pathlib.Path(fn).read_text()): pkgName = FmUtil.portageGetPkgNameFromPkgAtom(pkgAtom) if pkgName in useMap: useMap[pkgName] |= set(useList) else: useMap[pkgName] = set(useList) for pkgAtom, useList in useLine: pkgName = FmUtil.portageGetPkgNameFromPkgAtom(pkgAtom) if pkgName in useMap: useMap[pkgName] |= set(useList) else: useMap[pkgName] = set(useList) with open(fn, "w") as f: f.write(FmUtil.portageGenerateCfgUseFileByUseMap(useMap)) return True
def disableSwap(self, layout): serviceName = FmUtil.path2SwapServiceName(layout.dev_swap) self._disableSwapService(layout.dev_swap, serviceName) self._removeSwapService(layout.dev_swap, serviceName) if layout.name in ["bios-ext4", "efi-ext4"]: layout.remove_swap_file() elif layout.name in ["efi-bcache-btrfs", "efi-bcachefs"]: pass else: assert False
def info(self): self.manu = FmUtil.dmiDecodeWithCache("system-manufacturer") if self.manu not in ["Hewlett-Packard", "HP"]: return None self.model = FmUtil.dmiDecodeWithCache("system-product-name") self.sn = FmUtil.dmiDecodeWithCache("system-serial-number") ret = HwInfoPcBranded() ret.vendor = "HP" ret.model = self._name() ret.hwSpec = self._hwSpec() ret.serialNumber = self.sn ret.arch = "amd64" ret.chassis_type = self._chassisType() ret.hwDict = _UtilHwDict.get(ret.hwSpec) ret.changeList = self._changeList(ret.hwSpec, ret.hwDict) ret.kernelCfgRules = self._kernelCfgRules() ret.useFlags = self._useFlags() ret.grubExtraWaitTime = 0 return ret
def enableSwap(self, layout): if layout.name in ["bios-ext4", "efi-ext4"]: if layout.dev_swap is None: layout.create_swap_file() elif layout.name in ["efi-bcache-btrfs", "efi-bcachefs"]: if layout.dev_swap is None: raise Exception("no swap partition") else: assert False serviceName = FmUtil.path2SwapServiceName(layout.dev_swap) self._createSwapService(layout.dev_swap, serviceName) self._enableSwapService(layout.dev_swap, serviceName)
def _disableSwapService(self, path, serviceName): if self.param.runMode == "normal": FmUtil.cmdCall("systemctl", "stop", serviceName) elif self.param.runMode == "setup": FmUtil.cmdCall("swapoff", path) else: assert False FmUtil.cmdCall("systemctl", "disable", serviceName)
def _enableSwapService(self, path, serviceName): FmUtil.cmdCall("systemctl", "enable", serviceName) if self.param.runMode == "normal": FmUtil.cmdCall("systemctl", "start", serviceName) elif self.param.runMode == "setup": FmUtil.cmdCall("swapon", path) else: assert False
def syncDownKernel(self): assert self.wSshPort is not None stunnelCfgFile, newPort, proc = self._createStunnelProcess(self.wRsyncPort) try: cmd = "" cmd += "rsync -a -z -hhh --delete --info=progress2 " cmd += "-f '+ /boot' " cmd += "-f '+ /boot/config-*' " cmd += "-f '+ /boot/initramfs-*' " cmd += "-f '+ /boot/kernel-*' " cmd += "-f '+ /boot/System.map-*' " cmd += "-f '+ /boot/history/***' " cmd += "-f '+ /lib' " cmd += "-f '+ /lib/modules/***' " cmd += "-f '+ /lib/firmware/***' " cmd += "-f '- /**' " cmd += "rsync://127.0.0.1:%d/main /" % (newPort) FmUtil.shellExec(cmd) finally: proc.terminate() proc.wait() os.unlink(stunnelCfgFile)
def syncDownSystem(self): assert self.wSshPort is not None stunnelCfgFile, newPort, proc = self._createStunnelProcess(self.wRsyncPort) try: cmd = "" cmd += "rsync -a -z -hhh --delete --info=progress2 " for fn in self._ignoredPatternsWhenSyncDown(): cmd += "-f '- %s' " % (fn) cmd += "-f '+ /bin' " # /bin may be a symlink or directory cmd += "-f '+ /bin/***' " cmd += "-f '+ /etc/***' " cmd += "-f '+ /lib' " # /lib may be a symlink or directory cmd += "-f '+ /lib/***' " cmd += "-f '+ /lib32' " # /lib may be a symlink or directory cmd += "-f '+ /lib32/***' " cmd += "-f '+ /lib64' " # /lib may be a symlink or directory cmd += "-f '+ /lib64/***' " cmd += "-f '+ /opt/***' " cmd += "-f '+ /sbin' " # /sbin may be a symlink or directory cmd += "-f '+ /sbin/***' " cmd += "-f '+ /usr/***' " cmd += "-f '+ /var' " cmd += "-f '+ /var/cache' " cmd += "-f '+ /var/cache/edb/***' " cmd += "-f '+ /var/cache/portage/***' " cmd += "-f '+ /var/db' " cmd += "-f '+ /var/db/pkg/***' " cmd += "-f '+ /var/lib' " cmd += "-f '+ /var/lib/portage/***' " cmd += "-f '- /**' " cmd += "rsync://127.0.0.1:%d/main /" % (newPort) FmUtil.shellExec(cmd) finally: proc.terminate() proc.wait() os.unlink(stunnelCfgFile)
def info(self): self.manu = FmUtil.dmiDecodeWithCache("system-manufacturer") if self.manu != "Alibaba Cloud": return None assert FmUtil.dmiDecodeWithCache("system-product-name") == "Alibaba Cloud ECS" self.sn = FmUtil.dmiDecodeWithCache("system-serial-number") self.model = FmUtil.getMachineInfoWithCache("ALIYUN-HWNAME") assert self.model in self._MODELS ret = HwInfoPcBranded() ret.vendor = "ALIYUN" ret.model = self.model ret.hwSpec = self._hwSpec() ret.serialNumber = self.sn ret.arch = "amd64" ret.chassis_type = ChassisType.COMPUTER ret.hwDict = self._hwDict(ret.hwSpec) ret.changeList = self._changeList(ret.hwSpec, ret.hwDict) ret.kernelCfgRules = self._kernelCfgRules() ret.useFlags = self._useFlags() ret.grubExtraWaitTime = 20 return ret
def connectAndInit(self): try: self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((self.hostname, self.ctrlPort)) ctx = SSL.Context(SSL.TLSv1_2_METHOD) ctx.use_certificate_file(FmConst.myCertFile) ctx.use_privatekey_file(FmConst.myPrivKeyFile) self.sslSock = SSL.Connection(ctx, self.sock) self.sslSock.set_connect_state() self._sendRequestObj({ "command": "init", "hostname": socket.gethostname(), "cpu-arch": FmUtil.getCpuArch(), "cpu-model": FmUtil.getCpuModel(), "plugin": "gentoo", }) resp = self._recvReponseObj() if "error" in resp: raise Exception(resp["error"]) except: self.dispose() raise
def _createStunnelProcess(self, port): stunnelCfgFile = tempfile.mktemp() newPort = FmUtil.getFreeTcpPort() try: buf = "" buf += "cert = %s\n" % (FmConst.myCertFile) buf += "key = %s\n" % (FmConst.myPrivKeyFile) buf += "\n" buf += "client = yes\n" buf += "foreground = yes\n" buf += "\n" buf += "[rsync]\n" buf += "accept = %d\n" % (newPort) buf += "connect = %s:%d\n" % (self.hostname, port) with open(stunnelCfgFile, "w") as f: f.write(buf) proc = subprocess.Popen("stunnel %s 2>/dev/null" % (stunnelCfgFile), shell=True) FmUtil.waitTcpService("0.0.0.0", newPort) return (stunnelCfgFile, newPort, proc) except: os.unlink(stunnelCfgFile) raise
def backup(self, devPath): # FIXME: a very simple backup process if re.fullmatch("/dev/[a-z]+", devPath) is None: raise Exception("invalid backup device %s" % (devPath)) parti = devPath + "1" if not os.path.exists(parti): raise Exception("backup partition %s does not exist" % (devPath)) if FmUtil.isMountPoint(parti): raise Exception("%s is already mounted" % (devPath)) obj = strict_fsh.RootFs() wildcards = [] wildcards = strict_fsh.merge_wildcards( wildcards, obj.get_wildcards(wildcards_flag=strict_fsh.WILDCARDS_SYSTEM_CFG)) wildcards = strict_fsh.merge_wildcards( wildcards, obj.get_wildcards(wildcards_flag=strict_fsh.WILDCARDS_SYSTEM_DATA)) wildcards = strict_fsh.merge_wildcards( wildcards, obj.get_wildcards(wildcards_flag=strict_fsh.WILDCARDS_USER)) wildcards = strict_fsh.deduct_wildcards( wildcards, obj.get_wildcards(wildcards_flag=strict_fsh.WILDCARDS_USER_CACHE)) wildcards = strict_fsh.deduct_wildcards( wildcards, obj.get_wildcards(wildcards_flag=strict_fsh.WILDCARDS_USER_TRASH)) fileList = obj.wildcards_glob(wildcards) with TmpMount(parti) as mp: tarfilepath = os.path.join( mp.mountpoint, "backup-%s.tar.gz" % (datetime.now().strftime("%Y%m%d%H%M%S"))) with tarfile.open(tarfilepath, mode="x") as tf: for fullfn in fileList: print(fullfn) tf.add(fullfn, recursive=False)
def getAuxOsInfo(self): if not FmConst.supportOsProber: return [] ret = [] for line in FmUtil.cmdCall("os-prober").split("\n"): itemList = line.split(":") if len(itemList) != 4: continue if itemList[3] == "linux": continue if itemList[1].endswith( "(loader)"): # for Microsoft Windows quirks m = re.fullmatch("(.*?)([0-9]+)", itemList[0]) osDesc = itemList[1].replace("(loader)", "").strip() osPart = "%s%d" % (m.group(1), int(m.group(2)) + 1) chain = 4 ret.append(bbki.HostAuxOs(osDesc, osPart, chain)) continue if True: ret.append(bbki.HostAuxOs(itemList[1], itemList[0], 1)) continue return ret
#!/usr/bin/python3 # -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*- import sys sys.path.append('/usr/lib64/fpemud-os-sysman') from fm_util import FmUtil from helper_pkg_merger import PkgMerger pkgName = sys.argv[1] tmpOp = sys.argv[2] # "0" or "1" if not tmpOp and FmUtil.portageIsPkgInstalled(pkgName): print("The specified package is already installed.") else: PkgMerger().emergePkg("%s %s" % ("-1" if tmpOp == "1" else "", pkgName))
def _execAndSyncDownQuietly(self, buildServer, *args, directory=None): if buildServer is None: FmUtil.cmdExec(*args) else: buildServer.sshExec(*args) buildServer.syncDownDirectory(directory, quiet=True)
def _exec(self, buildServer, *args, base64=False): if buildServer is None: FmUtil.cmdExec(*args) else: buildServer.sshExec(*args, base64=base64)
def update(self, bSync, bFetchAndBuild): if self.param.runMode in ["normal", "setup"]: layout = strict_hdds.get_storage_layout() else: layout = None bbkiObj = BbkiWrapper(layout) pkgwh = PkgWarehouse() overlayDb = CloudOverlayDb() # set system to unstable status if self.param.runMode in ["normal", "setup"]: if bbkiObj.isStable(): with BootDirWriter(layout): bbkiObj.setStable(False) # modify dynamic config self.infoPrinter.printInfo(">> Preparing...") if True: dcm = DynCfgModifier() dcm.updateMirrors() dcm.updateDownloadCommand() dcm.updateParallelism(self.param.machineInfoGetter.hwInfo()) dcm.updateCcache() print("") # get build server if BuildServerSelector.hasBuildServerCfgFile(): self.infoPrinter.printInfo(">> Selecting build server...") buildServer = BuildServerSelector.selectBuildServer() print("") else: buildServer = None # sync up and start working if buildServer is not None: self.infoPrinter.printInfo(">> Synchronizing up...") buildServer.syncUp() buildServer.startWorking() print("") # do sync if bSync: # sync bbki repositories with ParallelRunSequencialPrint() as prspObj: if buildServer is not None: startCoro = buildServer.asyncStartSshExec waitCoro = buildServer.asyncWaitSshExec else: startCoro = FmUtil.asyncStartCmdExec waitCoro = FmUtil.asyncWaitCmdExec for repo in bbkiObj.repositories: prspObj.add_task( startCoro, [ self.opSync, self.param.runMode, "sync-bbki-repo", repo.name ], waitCoro, pre_func=lambda x=repo.name: self.infoPrinter. printInfo(">> Synchronizing BBKI repository \"%s\"..." % (x)), post_func=lambda: print(""), ) # FIXME: there should be no sync down after realtime network filesystem support is done if buildServer is not None: buildServer.syncDownDirectory(FmConst.portageDataDir) # sync repository directories for repoName in pkgwh.repoman.getRepositoryList(): repoDir = pkgwh.repoman.getRepoDir(repoName) self.infoPrinter.printInfo( ">> Synchronizing repository \"%s\"..." % (repoName)) self._execAndSyncDownQuietly(buildServer, self.opSync, self.param.runMode, "sync-repo", repoName, directory=repoDir) print("") # update cloud overlay db self.infoPrinter.printInfo( ">> Synchronizing cloud overlay database...") overlayDb.update() print("") # sync overlay directories with ParallelRunSequencialPrint() as prspObj: if buildServer is not None: startCoro = buildServer.asyncStartSshExec waitCoro = buildServer.asyncWaitSshExec else: startCoro = FmUtil.asyncStartCmdExec waitCoro = FmUtil.asyncWaitCmdExec for repo in pkgwh.layman.getOverlayList(): if pkgwh.layman.getOverlayType(repo) == "static": continue prspObj.add_task( startCoro, [ self.opSync, self.param.runMode, "sync-overlay", repo ], waitCoro, pre_func=lambda x=repo: self.infoPrinter.printInfo( ">> Synchronizing overlay \"%s\"..." % (x)), post_func=lambda: print(""), ) # FIXME: there should be no sync down after realtime network filesystem support is done if buildServer is not None: buildServer.syncDownDirectory(FmConst.portageDataDir) # add pre-enabled overlays for repo, ourl in pkgwh.getPreEnableOverlays().items(): if not pkgwh.layman.isOverlayExist(repo): self.infoPrinter.printInfo( ">> Installing overlay \"%s\"..." % repo) vcsType = "git" if overlayDb.hasOverlay(repo): vcsType, ourl = overlayDb.getOverlayVcsTypeAndUrl(repo) if ourl is None: raise Exception("no URL for overlay %s" % repo) if buildServer is None: FmUtil.cmdExec(self.opSync, self.param.runMode, "add-trusted-overlay", repo, vcsType, ourl) else: buildServer.sshExec(self.opSync, "add-trusted-overlay", repo, vcsType, ourl) buildServer.syncDownWildcardList([ os.path.join(pkgwh.layman.getOverlayFilesDir(repo), "***"), pkgwh.layman.getOverlayDir(repo), pkgwh.layman.getOverlayCfgReposFile(repo), ], quiet=True) print("") # add pre-enabled overlays by pre-enabled package for repo, data in pkgwh.getPreEnablePackages().items(): ourl = data[0] if not pkgwh.layman.isOverlayExist(repo): self.infoPrinter.printInfo( ">> Installing overlay \"%s\"..." % repo) vcsType = "git" if overlayDb.hasOverlay(repo): vcsType, ourl = overlayDb.getOverlayVcsTypeAndUrl(repo) if ourl is None: raise Exception("no URL for overlay %s" % repo) if buildServer is None: FmUtil.cmdExec(self.opSync, self.param.runMode, "add-transient-overlay", repo, vcsType, ourl) else: buildServer.sshExec(self.opSync, self.param.runMode, "add-transient-overlay", repo, vcsType, ourl) buildServer.syncDownWildcardList([ os.path.join(pkgwh.layman.getOverlayFilesDir(repo), "***"), pkgwh.layman.getOverlayDir(repo), pkgwh.layman.getOverlayCfgReposFile(repo), ], quiet=True) print("") # add pre-enabled packages for repo, data in pkgwh.getPreEnablePackages().items(): tlist = [ x for x in data[1] if not pkgwh.layman.isOverlayPackageEnabled(repo, x) ] if tlist != []: self.infoPrinter.printInfo( ">> Enabling packages in overlay \"%s\"..." % repo) self._exec(buildServer, self.opSync, self.param.runMode, "enable-overlay-package", repo, *tlist) print("") if buildServer is not None: buildServer.syncDownDirectory(os.path.join( FmConst.portageDataDir, "overlay-*"), quiet=True) # refresh package related stuff self._execAndSyncDownQuietly(buildServer, self.opSync, self.param.runMode, "refresh-package-related-stuff", directory=FmConst.portageCfgDir) # eliminate "Performing Global Updates" self._execAndSyncDownQuietly( buildServer, self.opSync, self.param.runMode, "touch-portage-tree", directory=FmConst.portageDbDir) # FIXME # do fetch and build if True: resultFile = os.path.join(self.param.tmpDir, "result.txt") kernelCfgRules = json.dumps( self.param.machineInfoGetter.hwInfo().kernelCfgRules) # install kernel, initramfs and bootloader with BootDirWriter(layout): self.infoPrinter.printInfo(">> Installing %s-%s..." % (bbkiObj.get_kernel_atom().fullname, bbkiObj.get_kernel_atom().ver)) if True: self._exec(buildServer, self.opInstallKernel, self.param.runMode, kernelCfgRules, resultFile) # kernelBuilt, postfix = self._parseKernelBuildResult(self._readResultFile(buildServer, resultFile)) print("") if buildServer is not None: self.infoPrinter.printInfo( ">> Synchronizing down /boot, /lib/modules and /lib/firmware..." ) buildServer.syncDownKernel() print("") self.infoPrinter.printInfo(">> Creating initramfs...") if True: if self.param.runMode in ["normal", "setup"]: bbkiObj.installInitramfs() else: print( "WARNING: Running in \"%s\" mode, do NOT create initramfs!!!" % (self.param.runMode)) print("") self.infoPrinter.printInfo(">> Updating boot-loader...") if self.param.runMode in ["normal", "setup"]: bbkiObj.updateBootloader() else: print( "WARNING: Running in \"%s\" mode, do NOT maniplate boot-loader!!!" % (self.param.runMode)) print("") # synchronize boot partitions if layout.name in [ "efi-btrfs", "efi-bcache-btrfs", "efi-bcachefs" ]: dstList = layout.get_pending_esp_list() if len(dstList) > 0: with self.infoPrinter.printInfoAndIndent( ">> Synchronizing boot partitions..."): for dst in dstList: self.infoPrinter.printInfo( " - %s to %s..." % (layout.get_esp(), dst)) layout.sync_esp(dst) print("") # emerge @world self.infoPrinter.printInfo(">> Updating @world...") if buildServer is not None: try: buildServer.sshExec(self.opEmergeWorld) finally: self.infoPrinter.printInfo( ">> Synchronizing down system files...") buildServer.syncDownSystem() print("") else: FmUtil.cmdExec(self.opEmergeWorld) # re-emerge all "-9999" packages self.infoPrinter.printInfo(">> Updating all \"-9999\" packages...") if buildServer is not None: try: buildServer.sshExec(self.opEmerge9999) finally: self.infoPrinter.printInfo( ">> Synchronizing down system files...") buildServer.syncDownSystem() print("") else: FmUtil.cmdExec(self.opEmerge9999) # end remote build if buildServer is not None: buildServer.dispose()