def reset_dir(self, pkgdirname=None, all=False, rmpkg=True): if all: logger.info('resetting %s', str(REPO_ROOT)) bash(GIT_RESET_SUBDIR, cwd=REPO_ROOT) else: if not pkgdirname: return False cwd = REPO_ROOT / pkgdirname if cwd.exists(): logger.info('resetting %s', str(cwd)) try: bash(GIT_RESET_SUBDIR, cwd=cwd) except Exception: logger.error(f'Unable to reset dir {cwd}') print_exc_plus() for fpath in [f for f in cwd.iterdir()]: if fpath.is_dir() and \ fpath.name in ('pkg', 'src'): if fpath.name == 'pkg': fpath.chmod(0o0755) rmtree(fpath) elif rmpkg and fpath.is_file() and \ (fpath.name.endswith(PKG_SUFFIX) or \ fpath.name.endswith(PKG_SIG_SUFFIX)): fpath.unlink() else: return False return True
def done(self, fnames, overwrite=False): ''' return None means success else returns an error string ''' if [ f for f in fnames if not (f.endswith(PKG_SUFFIX) or f.endswith(PKG_SIG_SUFFIX)) ]: return "files to upload are garbage" filter_sig = lambda fnames: [ fname for fname in fnames if not fname.endswith(PKG_SIG_SUFFIX) ] if sorted(filter_sig(fnames)) == sorted(filter_sig(self.fnames)): try: update_path = Path('updates') for pkgfname in filter_sig(fnames): pkg_found = False sig_found = False for fpath in update_path.iterdir(): if fpath.is_dir(): continue if fpath.name == pkgfname: pkg_found = fpath elif fpath.name == f'{pkgfname}.sig': sig_found = fpath if pkg_found and sig_found: try: bash(f'{GPG_VERIFY_CMD} {sig_found} {pkg_found}') except CalledProcessError: ret = f'{pkg_found} GPG verify error' logger.error(ret) print_exc_plus() return ret else: # gpg verify success continue else: return f'file missing: pkg {pkg_found} sig {sig_found}' raise RuntimeError("unexpected error") else: # gpg verified for all try: if update(overwrite=overwrite): return None else: raise RuntimeError('update return false') except Exception: print_exc_plus() return f'{pkg_found} update error' raise RuntimeError("unexpected error") finally: self.__init__() else: return "Wrong file"
def tick(self): ''' check for updates, create new jobs and run them ''' if not self.__buildjobs: # This part check for updates if time() - self.last_updatecheck <= UPDATE_INTERVAL * 60: if not self.idle: logger.info('Buildbot is idling for package updates.') self.idle = True return 60 else: self.last_updatecheck = time() self.idle = False # git pull repo try: bash(GIT_PULL, cwd=REPO_ROOT) except Exception: print_exc_plus() self.pkgconfigs = load_all_yaml() updates = updmgr.check_update() for update in updates: (pkgconfig, ver, buildarchs) = update march = True if len(buildarchs) >= 2 else False for arch in buildarchs: newjob = Job(arch, pkgconfig, ver, multiarch=march) self._new_buildjob(newjob) return 0 else: # This part does the job self.idle = False job = self.__get_job() if not job: logging.error('No job got') return if job.multiarch: self.__clean(job, remove_pkg=True) self.__makepkg(job) self.__sign(job) if self.__upload(job): self.__clean(job, remove_pkg=True) else: self.__makepkg(job) self.__sign(job) if self.__upload(job): if job.pkgconfig.cleanbuild: self.__clean(job, remove_pkg=True) else: self.__clean(job, rm_src=False, remove_pkg=True) self.__finish_job(job.pkgconfig.dirname) return 0
def __main(): while True: try: with Listener(MASTER_BIND_ADDRESS, authkey=MASTER_BIND_PASSWD) as listener: with listener.accept() as conn: logger.debug('connection accepted from %s', listener.last_accepted) myrecv = conn.recv() if type(myrecv) is list and len(myrecv) == 3: (funcname, args, kwargs) = myrecv funcname = str(funcname) conn.send(run(funcname, args=args, kwargs=kwargs)) except Exception: print_exc_plus()
def run(funcname, args=list(), kwargs=dict(), retries=0, server=(REPOD_BIND_ADDRESS, REPOD_BIND_PASSWD)): try: logger.info('client: %s %s %s', funcname, args, kwargs) (addr, authkey) = server with Client(addr, authkey=authkey) as conn: conn.send([funcname, args, kwargs]) return conn.recv() except ConnectionRefusedError: if retries <= 10: logger.info("Server refused, retry after 60s") sleep(60) return run(funcname, args=args, kwargs=kwargs, retries=retries + 1) else: logger.error("Server refused") return False except EOFError: logger.error('Internal server error') return False except Exception: print_exc_plus()
def __makepkg(self, job): cwd = REPO_ROOT / job.pkgconfig.dirname if job.multiarch: # assume a clean env, no source avail mkcmd = MAKEPKG_MAKE_CMD_MARCH else: mkcmd = MAKEPKG_MAKE_CMD_CLEAN if job.pkgconfig.cleanbuild \ else MAKEPKG_MAKE_CMD logger.info('makepkg in %s %s', job.pkgconfig.dirname, job.arch) # run pre-makepkg-scripts logger.debug('running pre-build scripts') for scr in getattr(job.pkgconfig, 'prebuild', list()): if type(scr) is str: try: mon_nspawn_shell(arch=job.arch, cwd=cwd, cmdline=scr, seconds=60 * 60) except Exception: print_exc_plus() # actually makepkg try: ret = mon_nspawn_shell(arch=job.arch, cwd=cwd, cmdline=mkcmd, logfile=cwd / MAKEPKG_LOGFILE, short_return=True, seconds=job.pkgconfig.timeout * 60) except Exception: logger.error(f'Job {job} failed. Running build-failure scripts') for scr in getattr(job.pkgconfig, 'failure', list()): if type(scr) is str: try: mon_nspawn_shell(arch=job.arch, cwd=cwd, cmdline=scr, seconds=60 * 60) except Exception: print_exc_plus() raise # run post-makepkg-scripts logger.debug('running post-build scripts') for scr in getattr(job.pkgconfig, 'postbuild', list()): if type(scr) is str: try: mon_nspawn_shell(arch=job.arch, cwd=cwd, cmdline=scr, seconds=60 * 60) except Exception: print_exc_plus() return ret
print('Error: Need package name') parser.print_help() parser.exit(status=1) server = (MASTER_BIND_ADDRESS, MASTER_BIND_PASSWD) for p in action[1:]: logger.info( run('rebuild_package', args=(p, ), kwargs={'clean': args.clean}, server=server)) elif action[0] == 'upload': if len(action) <= 1: print('Error: Need package name') parser.print_help() parser.exit(status=1) server = (MASTER_BIND_ADDRESS, MASTER_BIND_PASSWD) for p in action[1:]: logger.info( run('force_upload', args=(p, ), kwargs={'overwrite': args.overwrite}, server=server)) elif action[0] == 'log': logger.info('printing logs') print_log(debug=args.debug) else: parser.error("Please choose an action") except Exception: print_exc_plus() parser.exit(status=1)
def check_update(self, rebuild_package=None): updates = list() for pkg in jobsmgr.pkgconfigs: try: if self.__rebuilding and not rebuild_package: logger.info(f'Stop checking updates for rebuild.') break else: self.__rebuilding = bool(rebuild_package) if rebuild_package and \ rebuild_package != pkg.dirname: continue pkgdir = REPO_ROOT / pkg.dirname logger.info( f'{"[rebuild] " if rebuild_package else ""}checking update: {pkg.dirname}' ) if self.__pkgerrs.get(pkg.dirname, 0) >= 2: logger.warning( f'package: {pkg.dirname} too many failures checking update' ) if rebuild_package is None: continue pkgbuild = pkgdir / 'PKGBUILD' archs = get_arch_from_pkgbuild(pkgbuild) buildarchs = [ BUILD_ARCH_MAPPING.get(arch, None) for arch in archs ] buildarchs = [arch for arch in buildarchs if arch is not None] if not buildarchs: logger.warning( f'No build arch for {pkg.dirname}, refuse to build.') continue # hopefully we only need to check one arch for update arch = 'x86_64' if 'x86_64' in buildarchs else buildarchs[ 0] # prefer x86 # run pre_update_scripts logger.debug('running pre-update scripts') for scr in getattr(pkg, 'update', list()): if type(scr) is str: mon_nspawn_shell(arch, scr, cwd=pkgdir, seconds=60 * 60) mon_nspawn_shell(arch, MAKEPKG_UPD_CMD, cwd=pkgdir, seconds=5 * 60 * 60, logfile=pkgdir / PKG_UPDATE_LOGFILE, short_return=True) if pkg.type in ('git', 'manual'): ver = self.__get_new_ver(pkg.dirname, arch) oldver = self.__pkgvers.get(pkg.dirname, None) has_update = False if rebuild_package: has_update = True if oldver: res = vercmp(ver, oldver) if res == 1: has_update = True elif res == -1: logger.warning( f'package: {pkg.dirname} downgrade attempted') elif res == 0: logger.info( f'package: {pkg.dirname} is up to date') else: has_update = True # reset error counter self.__pkgerrs[pkg.dirname] = 0 if has_update: self.__pkgvers[pkg.dirname] = ver updates.append((pkg, ver, buildarchs)) else: logger.warning(f'unknown package type: {pkg.type}') except Exception: self.__pkgerrs[pkg.dirname] = self.__pkgerrs.get( pkg.dirname, 0) + 1 print_exc_plus() self._save() self.__rebuilding = False return updates
def __upload(self, job, overwrite=False): cwd = REPO_ROOT / job.pkgconfig.dirname f_to_upload = list() pkg_update_list = list() for fpath in cwd.iterdir(): if fpath.name.endswith(PKG_SUFFIX) and \ get_pkg_details_from_name(fpath.name).ver == job.version: sigpath = fpath.parent / f'{fpath.name}.sig' assert sigpath.exists() f_to_upload.append(sigpath) f_to_upload.append(fpath) pkg_update_list.append(fpath) sizes = [f.stat().st_size / 1000 / 1000 for f in f_to_upload] pkg_update_list_human = " ".join([f.name for f in pkg_update_list]) assert pkg_update_list max_tries = 10 for tries in range(max_tries): timeouts = rrun('push_start', args=([f.name for f in f_to_upload], sizes)) if type(timeouts) is list: break else: if tries + 1 < max_tries: logger.warning( f'Remote is busy ({timeouts}), wait 1 min x10 [{tries+1}/10]' ) sleep(60) else: raise RuntimeError('Remote is busy and cannot connect') assert len(f_to_upload) == len(timeouts) pkgs_timeouts = { f_to_upload[i]: timeouts[i] for i in range(len(sizes)) } for f in f_to_upload: max_tries = 5 for tries in range(max_tries): timeout = pkgs_timeouts.get(f) try: logger.info(f'Uploading {f.name}, timeout in {timeout}s') mon_bash(UPLOAD_CMD.format(src=f), seconds=int(timeout)) except Exception: time_to_sleep = (tries + 1) * 60 logger.error( f'We are getting problem uploading {f.name}, wait {time_to_sleep} secs' ) patret = rrun('push_add_time', args=(f.name, time_to_sleep + timeout)) if not patret is None: logger.error( f'Unable to run push_add_time, reason: {patret}') print_exc_plus() if tries + 1 < max_tries: sleep(time_to_sleep) else: break else: logger.error( f'Upload {f.name} failed, running push_fail and abort.') pfret = rrun('push_fail', args=(f.name, )) if not pfret is None: logger.error(f'Unable to run push_fail, reason: {pfret}') raise RuntimeError('Unable to upload some files') logger.info(f'Requesting repo update for {pkg_update_list_human}') res = "unexpected" max_tries = 5 for tries in range(max_tries): try: res = rrun('push_done', args=([f.name for f in f_to_upload], ), kwargs={ 'overwrite': overwrite, }) except Exception: time_to_sleep = (tries + 1) * 60 logger.info( f'Error updating {pkg_update_list_human}, wait {time_to_sleep} secs' ) print_exc_plus() if tries + 1 < max_tries: sleep(time_to_sleep) else: break else: ret = f'Update failed for {pkg_update_list_human}: max reties exceeded' logger.error(ret) raise RuntimeError(ret) if res is None: logger.info(f'Update success for {pkg_update_list_human}') else: ret = f'Update failed for {pkg_update_list_human}, reason: {res}' logger.error(ret) raise RuntimeError(ret) return res is None