def run(self): def _sha1_file(path): with open(path, 'rb') as f: sha1_ctx = hashlib.sha1() while True: buf = f.read(sha1_ctx.block_size) if not buf: return sha1_ctx.digest() sha1_ctx.update(buf) try: rlist = self._ret_list path = self._path if rlist: ret = list() else: ret = dict() for i in os.listdir(path): pf = os.path.join(path, i) if os.path.isfile(pf): checksum = _sha1_file(pf) if rlist: tmp = (i, checksum) ret.append(tmp) else: ret[i] = checksum self.result = ret except Exception as e: log.error("failed to obtain file list of: {0}".format(e))
def thread_work(): """wait for tasks and distribute them to judges""" global _task_queue, _refresh_interval threading.Thread(target=web.thread_sched_work, name="web.thread_web_sched_work").start() while not control.test_termination_flag(): while not control.test_termination_flag(): try: task = web.fetch_task() except web.Error as e: log.error( "ending program because of communication error with website" ) control.set_termination_flag() return if task is None: break task.lang_id = _get_lang_id(task.lang) _task_queue.put(task) log.info("fetched task #{0} from website".format(task.id)) time.sleep(_refresh_interval)
def _parse(self): pcode = self._pcode _parsers = {"1.0" : self._parse_1d0} try: tree = ElementTree() tree.parse(os.path.join(pcode, _PROBCONF_FILE)) root = tree.getroot() if (root.tag != "orzoj-prob-conf"): raise _Parse_error("tag 'orzoj-prob-conf' not found") if "version" not in root.attrib: raise _Parse_error("version not found") v = root.attrib["version"] if v not in _parsers: raise _Parse_error("unknown version: {0!r}" . format(v)) _parsers[v](root) except Exception as e: msg = "[pcode: {0!r}] failed to parse problem configuration file: {1}" . format(pcode, e) log.error(msg) raise Error(msg)
def fetch_task(): """try to fetch a new task. return None if no new task available. this function does not raise exceptions data: action=fetch_task, prev=array("type"=><type of previous task>, <type specified arguments>)|None return: array("type"=>type, <type specified arguments>) type: "none" -- no new task args: none "src" -- new source file to be judged args: id, prob, lang, src, input, output (see structures.py)""" global _fetch_task_prev try: ret = _read({"action": "fetch_task", "prev": _fetch_task_prev}) _fetch_task_prev = ret t = ret["type"] if t == "none": return if t == "src": _fetch_task_prev = {'type': 'src', 'id': ret['id']} v = structures.task() for i in v.__dict__: v.__dict__[i] = ret[i] v.id = int(v.id) return v raise _internal_error("unknown task type: {0!r}".format(t)) except Exception as e: log.error("failed to fetch task: {0}".format(e)) raise Error
def run(self): try: tf = tarfile.open(mode="r", fileobj=self._fobj) tf.extractall(self._dirpath) tf.close() except Exception as e: log.error("failed to extract tar file: {0}".format(e)) self.error = True
def recv(fpath, conn): """receive file and save it at @fpath, return the speed in kb/s OFTPError may be raised""" def _write_msg(m): msg.write_msg(conn, m) def _read_msg(): return msg.read_msg(conn) def _check_msg(m): if m != _read_msg(): log.warning("message check error.") raise OFTPError try: time_start = datetime.datetime.now() sha_ctx = hashlib.sha1() with open(fpath, "wb") as fptr: _check_msg(msg.OFTP_BEGIN) _write_msg(msg.OFTP_BEGIN) _write_msg(_OFTP_VERSION) if conn.read_uint32() != _OFTP_VERSION: log.warning("version check error.") raise OFTPError fsize = conn.read_uint32() _write_msg(msg.OFTP_TRANS_BEGIN) s = 0 while s < fsize: psize = _PACKET_SIZE s += psize if s > fsize: psize -= s - fsize buf = conn.read(psize) sha_ctx.update(buf) fptr.write(buf) _check_msg(msg.OFTP_END) _write_msg(msg.OFTP_END) conn.write(sha_ctx.digest()) m = _read_msg() if m == msg.OFTP_CHECK_OK: return fsize / 1024.0 / _td2seconds(datetime.datetime.now() - time_start) else: log.warning("SHA1 check failed while receiving file.") raise OFTPError except EnvironmentError as e: log.error("error while receiving file [errno {0}] [filename {1!r}]: {2}" . format(e.errno, e.filename, e.strerror)) conn.write_uint32(msg.OFTP_SYSTEM_ERROR) raise OFTPError except snc.Error: log.warning("failed to transfer file because of network error.") raise OFTPError
def __init__(self, sock, is_server=0): self._snc = None try: if is_server: is_server = 1 self._snc = _snc.snc(sock._socket, is_server, _timeout, _cert_file, _key_file, _ca_file) except Exception as e: log.error("failed to establish SSL connection:\n{0!r}".format(e)) raise Error
def get_query_list(): """return a list containing the queries for judge info data: action=get_query_list return: array of query list""" try: return phpserialize.dict_to_list(_read({"action": "get_query_list"})) except Exception as e: log.error("can not convert dict to list: {0}".format(e)) raise Error
def run(self): try: tf = tarfile.open(mode='w:gz', fileobj=self._fobj, dereference=True) for f in self._flist: tf.add(os.path.join(self._dirpath, f), f) tf.close() except Exception as e: log.error("failed to create tar file: {0}".format(e)) self.error = True
def socket(host, port, timeout=0): """set @host to None if in server mode (accept() usable) @timeout is only usable in client mode (connection timeout)""" try: return _socket_real(_snc.socket(host, port, _use_ipv6, timeout), host is None) except _snc.error_timeout: raise ErrorTimeout except Exception as e: log.error("socket error: {0!r}".format(e)) raise Error
def write(self, data, timeout=0): """write all of @data""" if timeout < 0: timeout = 0 else: timeout += _timeout try: return self._snc.write(data, timeout) except Exception as e: log.error("failed to write:\n{0!r}".format(e)) raise Error
def call(self): if self._func is not None: try: self._func(*self._args) except web.Error: self._par._on_error.set() except Exception as e: log.error( "error while communicating with orzoj-website: {0}". format(e)) self._par._on_error.set() self._func = None
def _clean_temp(): """clean temporary directory""" global _dir_temp_abs try: for i in os.listdir(_dir_temp_abs): p = _join_path(_dir_temp_abs, i) if os.path.isdir(p) and not os.path.islink(p): shutil.rmtree(p) else: os.remove(p) except Exception as e: log.error("failed to clean temporary directory [{0!r}]: {1}".format( _dir_temp_abs, e)) raise Error
def run_as_compiler(self, fsrc, extra_args=None): """run the executor as compiler (retrieve stderr and stdout) return a tuple (success, info), where success is a boolean value indicating whether it's compiled successfully, while info is the executor output or a string indicating some system error (human readable) if successfully compiled, info is None @extra_args can be a list of string (will be evaluated using limiter.eval_arg_list) no exceptions are raised""" global _cmd_vars var_dict = dict(_cmd_vars) var_dict['SRC'] = fsrc try: args = limiter.eval_arg_list(self._args, var_dict) if extra_args: args.extend(limiter.eval_arg_list(extra_args, var_dict)) except Exception as e: log.error("failed to parse executor configuration: {0}".format(e)) return (False, "failed to compile: executor configuration error") try: l = self._limiter del var_dict['SRC'] var_dict["TARGET"] = args l.run(var_dict, stdin=limiter.get_null_dev(False), stdout=limiter.SAVE_OUTPUT, stderr=limiter.SAVE_OUTPUT) if l.exe_status: if l.exe_status == structures.EXESTS_EXIT_NONZERO: return (False, l.stdout + l.stderr) else: return (False, "failed to compile: {0}: details: {1}".format( structures.EXECUTION_STATUS_STR[l.exe_status], l.exe_extra_info)) return (True, None) except limiter.SysError as e: return ( False, "failed to compile: limiter error: {0} [stderr: {1}]".format( e.msg, l.stderr)) except Exception as e: return (False, "failed to compile: caught exception: {0}".format(e))
def read(self, len, timeout=0): """read exactly @len bytes if @timeout is negative, it will block until data available; else timeout is @timeout plus @_timeout """ if timeout < 0: timeout = 0 else: timeout += _timeout try: return self._snc.read(len, timeout) except Exception as e: log.error("failed to read [len={0}]:\n{1!r}".format(len, e)) raise Error
def run(self): try: while self._ncase: try: res = self._queue.get(False) msg.write_msg(self._conn, msg.REPORT_CASE) res.write(self._conn) self._ncase -= 1 except Queue.Empty: msg.write_msg(self._conn, msg.TELL_ONLINE) time.sleep(msg.TELL_ONLINE_INTERVAL) except Exception as e: log.error("failed to report case result: {0}".format(e)) self._error = e
def _login(): """ 1. read at most _DYNAMIC_PASSWD_MAXLEN bytes from orz.php?action=login1&version=_VERSION, _dynamic_passwd is the data read _dynamic_passwd = "0" means version check error 2. from orz.php?action=login2&checksum=_sha1sum(_sha1sum(_dynamic_passwd + _static_passwd)), and verify that it should be _sha1sum(_sha1sum(_dynamic_passwd) + _static_passwd) 3. if it's the first time to login (i.e. relogin), send "refetch" to fetch all tasks with status "waiting on orzoj-server" """ global _static_passwd, _passwd, _first_login try: try: _dynamic_passwd = _read({ "action": "login1", "version": _VERSION }, _DYNAMIC_PASSWD_MAXLEN) if _dynamic_passwd == '0': raise _internal_error("website version check error") vpwd = _sha1sum(_sha1sum(_dynamic_passwd) + _static_passwd) _passwd = _sha1sum(_dynamic_passwd + _static_passwd) data = {"action": "login2", "checksum": _sha1sum(_passwd)} if _first_login: data["refetch"] = 1 pwd_peer = _read(data, len(vpwd)) if pwd_peer != vpwd: raise _internal_error( "website verification error [peer returned: {0!r}]".format( pwd_peer)) if _first_login: _first_login = False remove_judge_all() except Error: raise _internal_error("failed to login to the website") except _internal_error as e: log.error(e.msg) sys.exit("orzoj-server: {0}".format(e.msg))
def thread_sched_work(): global _web_addr, _timeout, _sched_interval while not control.test_termination_flag(): err = None try: ret = urllib2.urlopen(_web_addr + "?sched_work", None, _timeout).read() if ret != "0": err = ret except Exception as e: err = repr(e) if err is not None: log.error( "error while refreshing website scheduled jobs: {0}".format( err)) time.sleep(_sched_interval)
def accept(self, timeout=0): """return a tuple (conn, addr), where conn is a socket instance and addr is a string represeting the peer's address only available in server mode if timeout <= 0, it will block as long as necessary;\ otherwise it blocks at most @timeout seconds, and then raise Error""" if not self._is_server: return try: ret = self._socket.accept(timeout) return (_socket_real(ret[0], False), ret[1]) except _snc.error_timeout: raise ErrorTimeout except Exception as e: log.error("socket error: {0!r}".format(e)) raise Error
def register_new_judge(judge, query_ans): """register a new judge. @judge should be structures.judge, and query_ans should be a dict data: action=register_new_judge, judge=...(id:str), lang_supported=..., query_ans=phpserialize.dumps(query_ans) return: id_num=... (numeric judge id)""" try: judge.id_num = int( _read({ "action": "register_new_judge", "judge": judge.id, "lang_supported": list(judge.lang_supported), "query_ans": phpserialize.dumps(query_ans) })["id_num"]) except Exception as e: log.error("failed to register new judge: {0}".format(e)) raise Error
def get_null_dev(for_writing=True): """ get a file object pointing to the NULL device @for_writing: whether opens NULL device for writing (True) or reading (False) """ if for_writing: method = 'w' else: method = 'r' try: if conf.is_windows: f = open('nul', method) else: f = open('/dev/null', method) return f except Exception as e: log.error("failed to open NULL device: {0}", e) raise SysError("limiter system error")
def func(score, fstdin, fstdout, fusrout): score = str(score) fstdin = os.path.abspath(fstdin) fstdout = os.path.abspath(fstdout) fusrout = os.path.abspath(fusrout) res = lang.verifier_execute(pcode, verifier_path, time, mem, [score, fstdin, fstdout, fusrout]) if res[0].exe_status != structures.EXESTS_NORMAL: return (None, "failed to execute verifier [status: {0}]: {1}" . format(structures.EXECUTION_STATUS_STR[res[0].exe_status], res[0].extra_info)) res = res[1] l = res.split(' ', 1) try: val = int(l[0]) except Exception: log.error("[pcode {0!r}] verifier output is unrecognizable; original output of verifier: {1!r}" . format(pcode, res)) return (None, "unrecognizable verifier output") if len(l) == 1: return (val, None) return (val, l[1])
def run_judge(): _parse_opt() global _options, _server_addr, _server_port, _pid_file conf.parse_file(_options.conf_file) if not _options.no_daemon and conf.is_unix: daemon.daemon_start() daemon.pid_start() try: s = snc.socket(_server_addr, _server_port) except snc.Error: daemon.pid_end() try: work.connect(s) except work.Error: log.error("error occurred, terminating program") daemon.pid_end()
def close(self): try: if not self._closed: self._ssl.unwrap() self._closed = True except ssl.SSLError as e: log.error("SSLError: {0!r}".format(e)) except socket.error as e: log.error("socket error: {0!r}".format(e)) except socket.timeout: log.error("socket timeout")
def verifier_compile(self, pcode, fexe, src, extra_args=None): """return a tuple(success, info), for their meanings, refer to _Executor::run_as_compiler @fexe is the expected executable file path without extention @src is the source (string) @extra_args should be of type list if not None may raise Error if failed to write source file""" if not self._compiler: try: fexe = fexe + self._exe_ext with open(fexe, "w") as f: f.write(src) return (True, None) except Exception as e: log.error("failed to write verifier source: {0}".format(e)) raise Error srcpath = fexe + self._src_ext try: with open(srcpath, "r") as f: src_old = f.read() if src_old == src: return (True, None) except: pass try: with open(srcpath, "w") as f: f.write(src) except Exception as e: log.error("failed to write verifier source: {0}".format(e)) raise Error _cmd_vars["MEMORY"] = 0 _cmd_vars["DATADIR"] = os.path.abspath(pcode) ret = self._compiler.run_as_compiler(fexe, extra_args) if ret[0]: return ret try: os.remove(srcpath) return ret except Exception as e: log.error("failed to remove file: {0}".format(e)) raise Error
def write_all(self, data, timeout=0): """write repeatedly until all data are writen""" try: self._set_timeout(timeout) tot = 0 while tot < len(data): tot += self._ssl.write(data[tot:]) except socket.timeout: log.error("socket timeout") raise Error except socket.error as e: log.error("socket error: {0!r}".format(e)) raise Error except ssl.SSLError as e: log.error("SSLError: {0!r}".format(e)) raise Error
def read_all(self, count, timeout=0): """read repeatedly until @count bytes are read""" try: self._set_timeout(timeout) ret = '' while len(ret) < count: ret += self._ssl.read(count - len(ret)) return ret except socket.timeout: log.error("socket timeout") raise Error except socket.error as e: log.error("socket error: {0!r}".format(e)) raise Error except ssl.SSLError as e: log.error("SSLError: {0!r}".format(e)) raise Error
def __init__(self, sock, is_server=False): global _cert_file, _key_file, _ca_file try: self._closed = True self._ssl = ssl.wrap_socket(sock, keyfile=_key_file, certfile=_cert_file, ca_certs=_ca_file, server_side=is_server, cert_reqs=ssl.CERT_REQUIRED, ssl_version=ssl.PROTOCOL_TLSv1) self._closed = False # now successfully initialized except socket.timeout: log.error("socket timeout") raise Error except socket.error as e: log.error("socket error: {0!r}".format(e)) raise Error except ssl.SSLError as e: log.error("SSLError: {0!r}".format(e)) raise Error
def run(self, var_dict, stdin=None, stdout=None, stderr=None): """run the limiter under variables defined in @var_dict Note: @var_dict may be changed execution result can be accessed via self.exe_status, self.exe_time (in microseconds), self.exe_mem (in kb) and self.exe_extra_info if @stdout and/or @stderr is SAVE_OUTPUT, stdout and/or stderr will be stored in self.stdout and self.stderr """ self.stdout = None self.stderr = None if self._type == _LIMITER_FILE: try: ftmp = tempfile.mkstemp() var_dict["FILENAME"] = ftmp[1] except Exception as e: log.error( "[limiter {0!r}] failed to create temporary file: {1}". format(self._name, e)) raise SysError("limiter communication error") else: var_dict["SOCKNAME"] = self._socket_name try: args = eval_arg_list(self._args, var_dict) except Exception as e: log.error( "[limiter {0!r}] failed to evaluate argument: {1}".format( self._name, e)) raise SysError("limiter configuration error") log.debug("executing command: {0!r}".format(args)) try: stdout_ = stdout if stdout_ is SAVE_OUTPUT: stdout_ = subprocess.PIPE stderr_ = stderr if stderr_ is SAVE_OUTPUT: stderr_ = subprocess.PIPE p = subprocess.Popen(args, stdin=stdin, stdout=stdout_, stderr=stderr_) except OSError as e: log.error("error while calling Popen [errno {0}] " "[filename {1!r}]: {2}".format(e.errno, e.filename, e.strerror)) raise SysError("failed to execute limiter") except Exception as e: log.error("error while calling Popen: {0}".format(e)) raise SysError("failed to execute limiter") if self._type == _LIMITER_SOCKET: try: s = self._socket s.settimeout(1) (conn, addr) = s.accept() s.settimeout(None) (self.exe_status, self.exe_time, self.exe_mem, info_len) = \ struct.unpack("IIII", conn.recv(16)) if info_len: self.exe_extra_info = conn.recv(info_len) else: self.exe_extra_info = '' except socket.timeout: log.error("[limiter {0!r}] socket timed out".format( self._name)) raise SysError("limiter socket error") except Exception as e: log.error( "[limiter {0!r}] failed to retrieve data through socket: {1}" .format(self._name, e)) raise SysError("limiter socket error") if stdout is SAVE_OUTPUT or stderr is SAVE_OUTPUT: (self.stdout, self.stderr) = p.communicate() else: p.wait() log.debug('the command above now finished') if self._type == _LIMITER_FILE: try: with open(ftmp[1], 'rb') as f: (self.exe_status, self.exe_time, self.exe_mem, info_len) = \ struct.unpack("IIII", f.read(16)) if info_len: self.exe_extra_info = f.read(info_len) else: self.exe_extra_info = '' os.close(ftmp[0]) os.remove(ftmp[1]) except Exception as e: log.error( "[limiter {0!r}] failed to retrieve data through file: {1}" .format(self._name, e)) raise SysError("limiter file error") if self._type == _LIMITER_SOCKET: try: conn.close() except Exception as e: log.warning("failed to close socket connection: {0}".format(e))
def _solve_task(self): judge = self._judge def _write_msg(m): msg.write_msg(self._snc, m) def _write_str(s): self._snc.write_str(s) def _write_uint32(v): self._snc.write_uint32(v) def _read_msg(): return msg.read_msg(self._snc) def _read_str(): return self._snc.read_str() def _read_uint32(): return self._snc.read_uint32() def _check_msg(m): if m != _read_msg(): log.warning("[judge {0!r} message check error".format( judge.id)) raise _internal_error def _stop_web_report(tell_online=True): th_report.stop() if tell_online: while th_report.is_alive(): th_report.join(msg.TELL_ONLINE_INTERVAL) _write_msg(msg.TELL_ONLINE) else: th_report.join() global _task_queue task = _task_queue.get(self._lang_id_set) if task is None: _write_msg(msg.TELL_ONLINE) time.sleep(msg.TELL_ONLINE_INTERVAL) return log.info("[judge {0!r}] received task #{1} for problem {2!r}".format( judge.id, task.id, task.prob)) self._cur_task = task th_report = _thread_web_communicate() th_report.start() if not os.path.isdir(task.prob): self._cur_task = None log.error("No data for problem {0!r}, task #{1} discarded".format( task.prob, task.id)) th_report.report(web.report_no_data, [task]) _stop_web_report() return th_report.report(web.report_sync_data, [task, judge]) _write_msg(msg.PREPARE_DATA) _write_str(task.prob) speed = sync_dir.send(task.prob, self._snc) if speed: log.info("[judge {0!r}] file transfer speed: {1!r} kb/s".format( judge.id, speed)) m = _read_msg() if m == msg.DATA_ERROR: self._cur_task = None reason = _read_str() log.error( "[judge {0!r}] [task #{1}] [prob: {2!r}] data error:\n{3}". format(judge.id, task.id, task.prob, reason)) th_report.report(web.report_error, [task, "data error"]) _stop_web_report() return elif m != msg.DATA_OK: log.warning("[judge {0!r}] message check error".format(judge.id)) th_report.report(web.report_error, [task, "message check error"]) _stop_web_report(False) raise _internal_error ncase = _read_uint32() _write_msg(msg.START_JUDGE) _write_str(task.lang) _write_str(task.src) _write_str(task.input) _write_str(task.output) while True: m = _read_msg() if m == msg.START_JUDGE_OK: break if m != msg.START_JUDGE_WAIT: log.warning("[judge {0!r}] message check error".format( judge.id)) th_report.report(web.report_error, [task, "message check error"]) _stop_web_report(False) raise _internal_error th_report.report(web.report_compiling, [task]) while True: m = _read_msg() if m == msg.TELL_ONLINE: continue if m == msg.COMPILE_SUCCEED: th_report.report(web.report_compile_success, [task, ncase]) break else: if m != msg.COMPILE_FAIL: th_report.report(web.report_error, [task, "message check error"]) log.warning("[judge {0!r}] message check error".format( judge.id)) _stop_web_report(False) raise _internal_error self._cur_task = None th_report.report(web.report_compile_failure, [task, _read_str()]) _stop_web_report() return prob_res = list() for i in range(ncase): th_report.lazy_report(web.report_judge_progress, [task, i]) while True: m = _read_msg() if m == msg.REPORT_CASE: break if m != msg.TELL_ONLINE: log.warning("[judge {0!r}] message check error".format( judge.id)) th_report.report(web.report_error, [task, "message check error"]) _stop_web_report(False) raise _internal_error result = structures.case_result() result.read(self._snc) prob_res.append(result) th_report.clean_lazy() th_report.report(web.report_prob_result, [task, prob_res]) _check_msg(msg.REPORT_JUDGE_FINISH) self._cur_task = None _stop_web_report() if th_report.check_error(): log.warning( "[judge {0!r}] error while reporting judge results for task #{1}" .format(judge.id, task.id)) else: log.info("[judge {0!r}] finished task #{1} normally".format( judge.id, task.id))