def handle_request(req, filename = None): if req == '': log.alert('Empty body received. Filename: %s' % filename) return False keys = gpg.get_keys(req) (em, body) = gpg.verify_sig(req) if not em: log.alert("Invalid signature, missing/untrusted key. Keys in gpg batch: '%s'" % keys) return False user = acl.user_by_email(em) if user == None: # FIXME: security email here log.alert("'%s' not in acl. Keys in gpg batch: '%s'" % (em, keys)) return False acl.set_current_user(user) status.push("request from %s" % user.login) r = request.parse_request(body) if r.kind == 'group': handle_group(r, user) elif r.kind == 'notification': handle_notification(r, user) else: msg = "%s: don't know how to handle requests of this kind '%s'" \ % (user.get_login(), r.kind) log.alert(msg) m = user.message_to() m.set_headers(subject = "unknown request") m.write_line(msg) m.send() status.pop() return True
def handle_request(req, filename=None): if req == '': log.alert('Empty body received. Filename: %s' % filename) return False keys = gpg.get_keys(req) (em, body) = gpg.verify_sig(req) if not em: log.alert( "Invalid signature, missing/untrusted key. Keys in gpg batch: '%s'" % keys) return False user = acl.user_by_email(em) if user == None: # FIXME: security email here log.alert("'%s' not in acl. Keys in gpg batch: '%s'" % (em, keys)) return False acl.set_current_user(user) status.push("request from %s" % user.login) r = request.parse_request(body) if r.kind == 'group': handle_group(r, user) elif r.kind == 'notification': handle_notification(r, user) else: msg = "%s: don't know how to handle requests of this kind '%s'" \ % (user.get_login(), r.kind) log.alert(msg) m = user.message_to() m.set_headers(subject="unknown request") m.write_line(msg) m.send() status.pop() return True
def maybe_flush_queue(dir): retry_delay = 0 try: f = open(dir + "/retry-at") last_retry = int(string.strip(f.readline())) retry_delay = int(string.strip(f.readline())) f.close() if last_retry + retry_delay > time.time(): return os.unlink(dir + "/retry-at") except: pass status.push("flushing %s" % dir) if flush_queue(dir): f = open(dir + "/retry-at", "w") if retry_delay in retries_times: idx = retries_times.index(retry_delay) if idx < len(retries_times) - 1: idx += 1 else: idx = 0 f.write("%d\n%d\n" % (time.time(), retries_times[idx])) f.close() status.pop()
def handle_request_main(req, filename=None): acl.try_reload() init_conf("src") status.push("handling email request") ret = handle_request(req, filename=filename) status.pop() return ret
def handle_request_main(req, filename = None): acl.try_reload() init_conf("src") status.push("handling email request") ret = handle_request(req, filename = filename) status.pop() return ret
def build_srpm(r, b): if len(b.spec) <= len('.spec'): # should not really get here util.append_to(b.logfile, "error: No .spec given but build src.rpm wanted") return "FAIL" status.push("building %s" % b.spec) # messagebus.notify(topic="build_srpm.start", spec=b.spec, flags=r.flags, batch=b, request=r) b.src_rpm = "" builder_opts = "-nu -nm --nodeps --http --define \'_pld_builder 1\'" if ("test-build" in r.flags): tag_test = "" else: tag_test = " -Tp %s -tt" % (config.tag_prefixes[0], ) cmd = ( "cd rpm/packages; nice -n %s ./builder %s -bs %s -r %s %s %s %s %s 2>&1" % (config.nice, builder_opts, b.bconds_string(), b.branch, tag_test, b.kernel_string(), b.defines_string(), b.spec)) util.append_to(b.logfile, "request from: %s" % r.requester) util.append_to(b.logfile, "started at: %s" % time.asctime()) util.append_to(b.logfile, "building SRPM using: %s\n" % cmd) res = chroot.run(cmd, logfile=b.logfile) util.append_to(b.logfile, "exit status %d" % res) files = util.collect_files(b.logfile) if len(files) > 0: if len(files) > 1: util.append_to(b.logfile, "error: More than one file produced: %s" % files) res = "FAIL_TOOMANYFILES" last = files[len(files) - 1] b.src_rpm_file = last b.src_rpm = os.path.basename(last) r.chroot_files.extend(files) else: util.append_to(b.logfile, "error: No files produced.") res = "FAIL" if res == 0 and not "test-build" in r.flags: for pref in config.tag_prefixes: util.append_to(b.logfile, "Tagging with prefix: %s" % pref) res = chroot.run("cd rpm/packages; ./builder -bs %s -r %s -Tp %s -Tv %s %s" % \ (b.bconds_string(), b.branch, pref, b.defines_string(), b.spec), logfile = b.logfile) if res == 0: transfer_file(r, b) packagename = b.spec[:-5] packagedir = "rpm/packages/%s" % packagename chroot.run("rpm/packages/builder -m %s" % \ (b.spec,), logfile = b.logfile) chroot.run("rm -rf %s" % packagedir, logfile=b.logfile) status.pop() if res: res = "FAIL" # messagebus.notify(topic="build_srpm.finish", spec=b.spec) return res
def init_conf(builder=None): os.environ['LC_ALL'] = "C" status.push("reading builder config") log.builder = builder if not builder: builder = "all" config.read(builder) log.builder = config.builder status.pop()
def build_srpm(r, b): if len(b.spec) <= len('.spec'): # should not really get here util.append_to(b.logfile, "error: No .spec given but build src.rpm wanted") return "FAIL" status.push("building %s" % b.spec) b.src_rpm = "" builder_opts = "-nu -nm --nodeps --http" if ("test-build" in r.flags): tag_test="" else: tag_test=" -Tp %s -tt" % (config.tag_prefixes[0],) cmd = ("cd rpm/packages; nice -n %s ./builder %s -bs %s -r %s %s %s %s %s 2>&1" % (config.nice, builder_opts, b.bconds_string(), b.branch, tag_test, b.kernel_string(), b.defines_string(), b.spec)) util.append_to(b.logfile, "request from: %s" % r.requester) util.append_to(b.logfile, "started at: %s" % time.asctime()) util.append_to(b.logfile, "building SRPM using: %s\n" % cmd) res = chroot.run(cmd, logfile = b.logfile) util.append_to(b.logfile, "exit status %d" % res) files = util.collect_files(b.logfile) if len(files) > 0: if len(files) > 1: util.append_to(b.logfile, "error: More than one file produced: %s" % files) res = "FAIL_TOOMANYFILES" last = files[len(files) - 1] b.src_rpm_file = last b.src_rpm = os.path.basename(last) r.chroot_files.extend(files) else: util.append_to(b.logfile, "error: No files produced.") res = "FAIL" if res == 0 and not "test-build" in r.flags: for pref in config.tag_prefixes: util.append_to(b.logfile, "Tagging with prefix: %s" % pref) res = chroot.run("cd rpm/packages; ./builder -r %s -Tp %s -Tv %s" % \ (b.branch, pref, b.spec), logfile = b.logfile) if res == 0: transfer_file(r, b) packagename = b.spec[:-5] packagedir = "rpm/packages/%s" % packagename chroot.run("rpm/packages/builder -m %s" % \ (b.spec,), logfile = b.logfile) chroot.run("rm -rf %s" % packagedir, logfile = b.logfile) status.pop() if res: res = "FAIL" return res
def reload(self): self.blacklist_file_mtime = os.stat(path.blacklist_file)[stat.ST_MTIME] self.blacklist = set() status.push("reading package-blacklist") f = open(path.blacklist_file) for l in f: p = l.rstrip() if re.match(r"^#.*", p): continue self.blacklist.add(p) log.notice("blacklist added: %s" % l) f.close() status.pop()
def reload(self): self.acl_conf_mtime = os.stat(path.acl_conf)[stat.ST_MTIME] self.current_user = None status.push("reading acl.conf") p = ConfigParser.ConfigParser() p.readfp(open(path.acl_conf)) self.users = {} for login in p.sections(): if self.users.has_key(login): log.panic("acl: duplicate login: %s" % login) continue user = User(p, login) for e in user.gpg_emails: if self.users.has_key(e): log.panic("acl: user email colision %s <-> %s" % \ (self.users[e].login, login)) else: self.users[e] = user self.users[login] = user status.pop()
def main(): init_conf("src") if lock("building-srpm", non_block=1) == None: return while True: status.push("srpm: processing queue") q = B_Queue(path.queue_file) if not q.lock(1): status.pop() return q.read() if q.requests == []: q.unlock() status.pop() return r = pick_request(q) q.write() q.unlock() status.pop() status.push("srpm: handling request from %s" % r.requester) handle_request(r) status.pop()
def main(): init_conf("src") if lock("building-srpm", non_block = 1) == None: return while True: status.push("srpm: processing queue") q = B_Queue(path.queue_file) if not q.lock(1): status.pop() return q.read() if q.requests == []: q.unlock() status.pop() return r = pick_request(q) q.write() q.unlock() status.pop() status.push("srpm: handling request from %s" % r.requester) handle_request(r) status.pop()
def main(): lck = lock.lock("request_fetcher", non_block=True) if lck == None: sys.exit(1) init_conf() acl.try_reload() status.push("fetching requests") if has_new(config.control_url): q = fetch_queue(config.control_url) max_no = 0 q_new = [] for r in q: if r.no > max_no: max_no = r.no if r.no > last_count: q_new.append(r) for b in config.binary_builders: handle_reqs(b, q_new) f = open(path.last_req_no_file, "w") f.write("%d\n" % max_no) f.close() status.pop() lck.close()
def main(): lck = lock.lock("request_fetcher", non_block = True) if lck == None: sys.exit(1) init_conf() acl.try_reload() status.push("fetching requests") if has_new(config.control_url): q = fetch_queue(config.control_url) max_no = 0 q_new = [] for r in q: if r.no > max_no: max_no = r.no if r.no > last_count: q_new.append(r) for b in config.binary_builders: handle_reqs(b, q_new) f = open(path.last_req_no_file, "w") f.write("%d\n" % max_no) f.close() status.pop() lck.close()
def build_rpm(r, b): packagename = b.get_package_name() if not packagename: # should not really get here b.log_line("error: No .spec not given of malformed: '%s'" % b.spec) res = "FAIL_INTERNAL" return res status.push("building %s (%s)" % (b.spec, packagename)) b.log_line("request from: %s" % r.requester) if check_skip_build(r, b): b.log_line("build skipped due to src builder request") res = "SKIP_REQUESTED" return res b.log_line("started at: %s" % time.asctime()) fetch_src(r, b) b.log_line("installing srpm: %s" % b.src_rpm) res = chroot.run(""" set -ex; install -d %(topdir)s/{BUILD,RPMS}; rpm -Uhv --nodeps %(rpmdefs)s %(src_rpm)s; rm -f %(src_rpm)s; """ % { 'topdir' : b._topdir, 'rpmdefs' : b.rpmbuild_opts(), 'src_rpm' : b.src_rpm }, logfile = b.logfile) b.files = [] tmpdir = b.tmpdir() if res: b.log_line("error: installing src rpm failed") res = "FAIL_SRPM_INSTALL" else: prepare_env() chroot.run("set -x; install -m 700 -d %s" % tmpdir, logfile=b.logfile) b.default_target(config.arch) # check for build arch before filling BR cmd = "set -ex; TMPDIR=%(tmpdir)s exec nice -n %(nice)s " \ "rpmbuild -bp --short-circuit --nodeps %(rpmdefs)s --define 'prep exit 0' %(topdir)s/%(spec)s" % { 'tmpdir': tmpdir, 'nice' : config.nice, 'topdir' : b._topdir, 'rpmdefs' : b.rpmbuild_opts(), 'spec': b.spec, } res = chroot.run(cmd, logfile = b.logfile) if res: res = "UNSUPP" b.log_line("error: build arch check (%s) failed" % cmd) if not res: if ("no-install-br" not in r.flags) and not install.uninstall_self_conflict(b): res = "FAIL_DEPS_UNINSTALL" if ("no-install-br" not in r.flags) and not install.install_br(r, b): res = "FAIL_DEPS_INSTALL" if not res: max_jobs = max(min(int(os.sysconf('SC_NPROCESSORS_ONLN') + 1), config.max_jobs), 1) if r.max_jobs > 0: max_jobs = max(min(config.max_jobs, r.max_jobs), 1) cmd = "set -ex; : build-id: %(r_id)s; TMPDIR=%(tmpdir)s exec nice -n %(nice)s " \ "rpmbuild -bb --define '_smp_mflags -j%(max_jobs)d' %(rpmdefs)s %(topdir)s/%(spec)s" % { 'r_id' : r.id, 'tmpdir': tmpdir, 'nice' : config.nice, 'rpmdefs' : b.rpmbuild_opts(), 'topdir' : b._topdir, 'max_jobs' : max_jobs, 'spec': b.spec, } b.log_line("building RPM using: %s" % cmd) begin_time = time.time() res = chroot.run(cmd, logfile = b.logfile) end_time = time.time() b.log_line("ended at: %s, done in %s" % (time.asctime(), datetime.timedelta(0, end_time - begin_time))) if res: res = "FAIL" files = util.collect_files(b.logfile, basedir = b._topdir) if len(files) > 0: r.chroot_files.extend(files) else: b.log_line("error: No files produced.") last_section = util.find_last_section(b.logfile) if last_section == None: res = "FAIL" else: res = "FAIL_%s" % last_section.upper() b.files = files # cleanup tmp and build files chroot.run(""" set -ex; chmod -R u+rwX %(topdir)s/BUILD; rm -rf %(topdir)s/{tmp,BUILD} """ % { 'topdir' : b._topdir, }, logfile = b.logfile) def ll(l): util.append_to(b.logfile, l) if b.files != []: rpm_cache_dir = config.rpm_cache_dir if "test-build" not in r.flags: # NOTE: copying to cache dir doesn't mean that build failed, so ignore result b.log_line("copy rpm files to cache_dir: %s" % rpm_cache_dir) chroot.run( "cp -f %s %s && poldek --mo=nodiff --mkidxz -s %s/" % \ (string.join(b.files), rpm_cache_dir, rpm_cache_dir), logfile = b.logfile, user = "******" ) else: ll("test-build: not copying to " + rpm_cache_dir) ll("Begin-PLD-Builder-Info") if "upgrade" in r.flags: b.upgraded = install.upgrade_from_batch(r, b) else: ll("not upgrading") ll("End-PLD-Builder-Info") for f in b.files: local = r.tmp_dir + os.path.basename(f) chroot.cp(f, outfile = local, rm = True) ftp.add(local) # cleanup all remains from this build chroot.run(""" set -ex; rm -rf %(topdir)s; """ % { 'topdir' : b._topdir, }, logfile = b.logfile) def uploadinfo(b): c="file:SRPMS:%s\n" % b.src_rpm for f in b.files: c=c + "file:ARCH:%s\n" % os.path.basename(f) c=c + "END\n" return c if config.gen_upinfo and b.files != [] and 'test-build' not in r.flags: fname = r.tmp_dir + b.src_rpm + ".uploadinfo" f = open(fname, "w") f.write(uploadinfo(b)) f.close() ftp.add(fname, "uploadinfo") status.pop() return res
def main_for(builder): msg = "" init_conf(builder) q = B_Queue(path.queue_file + "-" + config.builder) q.lock(0) q.read() if q.requests == []: q.unlock() return req = pick_request(q) q.unlock() # high priority tasks have priority < 0, normal tasks >= 0 if req.priority >= 0: # allow only one build in given builder at once if not lock.lock("building-rpm-for-%s" % config.builder, non_block = 1): return # don't kill server check_load() # not more then job_slots builds at once locked = 0 for slot in range(config.job_slots): if lock.lock("building-rpm-slot-%d" % slot, non_block = 1): locked = 1 break if not locked: return # record fact that we got lock for this builder, load balancer # will use it for fair-queuing l = lock.lock("got-lock") f = open(path.got_lock_file, "a") f.write(config.builder + "\n") f.close() l.close() else: msg = "HIGH PRIORITY: " msg += "handling request %s (%d) for %s from %s, priority %s" \ % (req.id, req.no, config.builder, req.requester, req.priority) log.notice(msg) status.push(msg) handle_request(req) status.pop() def otherreqs(r): if r.no==req.no: return False else: return True q = B_Queue(path.queue_file + "-" + config.builder) q.lock(0) q.read() previouslen=len(q.requests) q.requests=filter(otherreqs, q.requests) if len(q.requests)<previouslen: q.write() q.unlock()
def main_for(builder): msg = "" init_conf(builder) q = B_Queue(path.queue_file + "-" + config.builder) q.lock(0) q.read() if q.requests == []: q.unlock() return req = pick_request(q) q.unlock() # high priority tasks have priority < 0, normal tasks >= 0 if req.priority >= 0: # allow only one build in given builder at once if not lock.lock("building-rpm-for-%s" % config.builder, non_block=1): return # don't kill server check_load() # not more then job_slots builds at once locked = 0 for slot in range(config.job_slots): if lock.lock("building-rpm-slot-%d" % slot, non_block=1): locked = 1 break if not locked: return # record fact that we got lock for this builder, load balancer # will use it for fair-queuing l = lock.lock("got-lock") f = open(path.got_lock_file, "a") f.write(config.builder + "\n") f.close() l.close() else: msg = "HIGH PRIORITY: " msg += "handling request %s (%d) for %s from %s, priority %s" \ % (req.id, req.no, config.builder, req.requester, req.priority) log.notice(msg) status.push(msg) handle_request(req) status.pop() def otherreqs(r): if r.no == req.no: return False else: return True q = B_Queue(path.queue_file + "-" + config.builder) q.lock(0) q.read() previouslen = len(q.requests) q.requests = filter(otherreqs, q.requests) if len(q.requests) < previouslen: q.write() q.unlock()
def build_rpm(r, b): packagename = b.get_package_name() if not packagename: # should not really get here b.log_line("error: No .spec not given of malformed: '%s'" % b.spec) res = "FAIL_INTERNAL" return res status.push("building %s (%s)" % (b.spec, packagename)) b.log_line("request from: %s" % r.requester) if check_skip_build(r, b): b.log_line("build skipped due to src builder request") res = "SKIP_REQUESTED" return res b.log_line("started at: %s" % time.asctime()) fetch_src(r, b) b.log_line("installing srpm: %s" % b.src_rpm) res = chroot.run(""" set -ex; install -d %(topdir)s/{BUILD,RPMS}; rpm -Uhv --nodeps %(rpmdefs)s %(src_rpm)s; rm -f %(src_rpm)s; """ % { 'topdir': b._topdir, 'rpmdefs': b.rpmbuild_opts(), 'src_rpm': b.src_rpm }, logfile=b.logfile) b.files = [] tmpdir = b.tmpdir() if res: b.log_line("error: installing src rpm failed") res = "FAIL_SRPM_INSTALL" else: prepare_env() chroot.run("set -x; install -m 700 -d %s" % tmpdir, logfile=b.logfile) b.default_target(config.arch) # check for build arch before filling BR cmd = "set -ex; TMPDIR=%(tmpdir)s exec nice -n %(nice)s " \ "rpmbuild -bp --short-circuit --nodeps %(rpmdefs)s --define 'prep exit 0' %(topdir)s/%(spec)s" % { 'tmpdir': tmpdir, 'nice' : config.nice, 'topdir' : b._topdir, 'rpmdefs' : b.rpmbuild_opts(), 'spec': b.spec, } res = chroot.run(cmd, logfile=b.logfile) if res: res = "UNSUPP" b.log_line("error: build arch check (%s) failed" % cmd) if not res: if ("no-install-br" not in r.flags) and not install.uninstall_self_conflict(b): res = "FAIL_DEPS_UNINSTALL" if ("no-install-br" not in r.flags) and not install.install_br(r, b): res = "FAIL_DEPS_INSTALL" if not res: max_jobs = max( min(int(os.sysconf('SC_NPROCESSORS_ONLN') + 1), config.max_jobs), 1) if r.max_jobs > 0: max_jobs = max(min(config.max_jobs, r.max_jobs), 1) cmd = "set -ex; : build-id: %(r_id)s; TMPDIR=%(tmpdir)s exec nice -n %(nice)s " \ "rpmbuild -bb --define '_smp_mflags -j%(max_jobs)d' %(rpmdefs)s %(topdir)s/%(spec)s" % { 'r_id' : r.id, 'tmpdir': tmpdir, 'nice' : config.nice, 'rpmdefs' : b.rpmbuild_opts(), 'topdir' : b._topdir, 'max_jobs' : max_jobs, 'spec': b.spec, } b.log_line("building RPM using: %s" % cmd) begin_time = time.time() res = chroot.run(cmd, logfile=b.logfile) end_time = time.time() b.log_line("ended at: %s, done in %s" % (time.asctime(), datetime.timedelta(0, end_time - begin_time))) if res: res = "FAIL" files = util.collect_files(b.logfile, basedir=b._topdir) if len(files) > 0: r.chroot_files.extend(files) else: b.log_line("error: No files produced.") last_section = util.find_last_section(b.logfile) if last_section == None: res = "FAIL" else: res = "FAIL_%s" % last_section.upper() b.files = files # cleanup tmp and build files chroot.run(""" set -ex; chmod -R u+rwX %(topdir)s/BUILD; rm -rf %(topdir)s/{tmp,BUILD} """ % { 'topdir': b._topdir, }, logfile=b.logfile) def ll(l): util.append_to(b.logfile, l) if b.files != []: rpm_cache_dir = config.rpm_cache_dir if "test-build" not in r.flags: # NOTE: copying to cache dir doesn't mean that build failed, so ignore result b.log_line("copy rpm files to cache_dir: %s" % rpm_cache_dir) chroot.run( "cp -f %s %s && poldek --mo=nodiff --mkidxz -s %s/" % \ (string.join(b.files), rpm_cache_dir, rpm_cache_dir), logfile = b.logfile, user = "******" ) else: ll("test-build: not copying to " + rpm_cache_dir) ll("Begin-PLD-Builder-Info") if "upgrade" in r.flags: b.upgraded = install.upgrade_from_batch(r, b) else: ll("not upgrading") ll("End-PLD-Builder-Info") for f in b.files: local = r.tmp_dir + os.path.basename(f) chroot.cp(f, outfile=local, rm=True) ftp.add(local) # cleanup all remains from this build chroot.run(""" set -ex; rm -rf %(topdir)s; """ % { 'topdir': b._topdir, }, logfile=b.logfile) def uploadinfo(b): c = "file:SRPMS:%s\n" % b.src_rpm for f in b.files: c = c + "file:ARCH:%s\n" % os.path.basename(f) c = c + "END\n" return c if config.gen_upinfo and b.files != [] and 'test-build' not in r.flags: fname = r.tmp_dir + b.src_rpm + ".uploadinfo" f = open(fname, "w") f.write(uploadinfo(b)) f.close() ftp.add(fname, "uploadinfo") status.pop() return res