def test_stdchal(self): '''Test g++, I/O redirect special judge.''' chal = StdChal(1, 'tests/testdata/test.cpp', 'g++', 'ioredir', \ 'tests/testdata/res', [ { 'in': 'tests/testdata/in.txt', 'ans': 'tests/testdata/ans.txt', 'timelimit': 10000, 'memlimit': 256 * 1024 * 1024, } ] * 4, { 'redir_test': { "testin": 0, "testout": -1, "pipein": -1, "pipeout": 1, }, 'redir_check': { "testin": -1, "ansin": 2, "pipein": -1, "pipeout": 0, } } \ ) result_list, verdict = yield chal.start() self.assertEqual(len(result_list), 4) for result in result_list: _, _, status = result self.assertEqual(status, STATUS_AC)
def __init__(self, *args): Privilege.init() PyExt.init() StdChal.init() IOLoop.configure(EvIOLoop) Server.init_socket_server() super().__init__(*args)
def main(): '''Main function.''' Privilege.init() PyExt.init() StdChal.init() IOLoop.configure(EvIOLoop) init_socket_server() IOLoop.instance().start()
def main(): '''Main function.''' Privilege.init() PyExt.init() StdChal.init() IOLoop.configure(EvIOLoop) init_websocket_server() IOLoop.instance().start()
def main(): '''Main function.''' Privilege.init() PyExt.init() StdChal.init() IOLoop.configure(EvIOLoop) app = Application([ (r'/judge', JudgeHandler), ]) app.listen(2501) IOLoop.instance().start()
def test_stdchal(self): '''Test g++, A + B problems.''' chal = StdChal(1, 'tests/testdata/test.cpp', 'g++', 'tests/testdata', [{ 'in': 'tests/testdata/in.txt', 'ans': 'tests/testdata/ans.txt', 'timelimit': 10000, 'memlimit': 256 * 1024 * 1024, }] * 4) result_list = yield chal.start() for result in result_list: _, _, status = result self.assertEqual(status, 1)
def test_stdchal(self): '''Test g++, A + B problems.''' chal = StdChal(1, 'tests/testdata/test.cpp', 'g++', 'tests/testdata', [ { 'in': 'tests/testdata/in.txt', 'ans': 'tests/testdata/ans.txt', 'timelimit': 10000, 'memlimit': 256 * 1024 * 1024, } ] * 4) result_list = yield chal.start() for result in result_list: _, _, status = result self.assertEqual(status, 1)
def test_stdchal(self): '''Test g++, A + B problems.''' chal = StdChal(1, 'tests/testdata/test.cpp', 'g++', 'diff', \ 'tests/testdata/res', \ [ { 'in': 'tests/testdata/res/testdata/0.in', 'ans': 'tests/testdata/res/testdata/0.out', 'timelimit': 10000, 'memlimit': 256 * 1024 * 1024, } ] * 4, {}) result_list = yield chal.start() self.assertEqual(len(result_list), 4) for result in result_list: _, _, status, _ = result self.assertEqual(status, STATUS_AC)
def __init__(self, *args): Privilege.init() PyExt.init() StdChal.init() super().__init__(*args)
def start_chal(obj, websk): '''Start a challenge. Check the challenge config, issue judge tasks, then report the result. Args: obj (dict): Challenge config. websk (WebSocketHandler): Websocket object. Returns: None ''' chal_id = obj['chal_id'] code_path = '/srv/nfs' + obj['code_path'][4:] test_list = obj['testl'] res_path = '/srv/nfs' + obj['res_path'][4:] test_paramlist = list() comp_type = test_list[0]['comp_type'] assert comp_type in ['g++', 'clang++', 'makefile', 'python3'] for test in test_list: assert test['comp_type'] == comp_type assert test['check_type'] == 'diff' test_idx = test['test_idx'] memlimit = test['memlimit'] timelimit = test['timelimit'] data_ids = test['metadata']['data'] for data_id in data_ids: test_paramlist.append({ 'in': res_path + '/testdata/%d.in' % data_id, 'ans': res_path + '/testdata/%d.out' % data_id, 'timelimit': timelimit, 'memlimit': memlimit, }) chal = StdChal(chal_id, code_path, comp_type, res_path, test_paramlist) result_list = yield chal.start() idx = 0 for test in test_list: test_idx = test['test_idx'] data_ids = test['metadata']['data'] total_runtime = 0 total_mem = 0 total_status = 0 for data_id in data_ids: runtime, peakmem, status = result_list[idx] total_runtime += runtime total_mem += peakmem total_status = max(total_status, status) idx += 1 websk.write_message( json.dumps({ 'chal_id': chal_id, 'test_idx': test_idx, 'state': total_status, 'runtime': total_runtime, 'memory': total_mem, })) JudgeHandler.chal_running_count -= 1 JudgeHandler.emit_chal()
def test_stdchal(self): '''Test g++, I/O redirect special judge.''' chal = StdChal(1, 'tests/testdata/test.cpp', 'g++', 'ioredir', \ 'tests/testdata/res', [ { 'in': 'tests/testdata/res/testdata/0.in', 'ans': 'tests/testdata/res/testdata/0.out', 'timelimit': 10000, 'memlimit': 256 * 1024 * 1024, } ] * 4, { 'redir_test': { "testin": 0, "testout": -1, "pipein": -1, "pipeout": 1, }, 'redir_check': { "testin": -1, "ansin": 2, "pipein": -1, "pipeout": 0, } } \ ) result_list = yield chal.start() self.assertEqual(len(result_list), 4) for result in result_list: _, _, status, verdict = result self.assertEqual(status, STATUS_AC) self.assertEqual(verdict, 'Passed\n') chal = StdChal(2, 'tests/testdata/testwa.cpp', 'g++', 'ioredir', \ 'tests/testdata/res', [ { 'in': 'tests/testdata/res/testdata/0.in', 'ans': 'tests/testdata/res/testdata/0.out', 'timelimit': 10000, 'memlimit': 256 * 1024 * 1024, } ], { 'redir_test': { "testin": 0, "testout": -1, "pipein": -1, "pipeout": 1, }, 'redir_check': { "testin": -1, "ansin": 2, "pipein": -1, "pipeout": 0, } } \ ) result_list = yield chal.start() self.assertEqual(len(result_list), 1) _, _, status, verdict = result_list[0] self.assertEqual(status, STATUS_WA) self.assertEqual(verdict, 'Diff\n12\n\n7\n\n') chal = StdChal(3, 'tests/testdata/res_act/test.cpp', 'g++', 'ioredir', \ 'tests/testdata/res_act', [ { 'in': None, 'ans': None, 'timelimit': 10000, 'memlimit': 256 * 1024 * 1024, } ], { 'redir_test': { "testin": -1, "testout": -1, "pipein": 0, "pipeout": 1, }, 'redir_check': { "testin": -1, "ansin": -1, "pipein": 1, "pipeout": 0, } } \ ) result_list = yield chal.start() self.assertEqual(len(result_list), 1) _, _, status, verdict = result_list[0] self.assertEqual(status, STATUS_AC)
def start_chal(obj, callback): '''Start a challenge. Check the challenge config, issue judge tasks, then report the result. Args: obj (dict): Challenge config. callback: Challenge callback. Returns: None ''' # The worst exception, there is no chal_id in the obj. chal_id = None try: chal_id = obj['chal_id'] code_path = obj['code_path'] res_path = obj['res_path'] test_list = obj['test'] metadata = obj['metadata'] comp_type = obj['comp_type'] check_type = obj['check_type'] test_paramlist = list() assert comp_type in ['g++', 'clang++', 'makefile', 'python3'] assert check_type in ['diff', 'ioredir'] for test in test_list: test_idx = test['test_idx'] memlimit = test['memlimit'] timelimit = test['timelimit'] data_ids = test['metadata']['data'] for data_id in data_ids: test_paramlist.append({ 'in': res_path + '/testdata/%d.in' % data_id, 'ans': res_path + '/testdata/%d.out' % data_id, 'timelimit': timelimit, 'memlimit': memlimit, }) chal = StdChal(chal_id, code_path, comp_type, check_type, \ res_path, test_paramlist, metadata) result_list = yield chal.start() result = [] idx = 0 for test in test_list: test_idx = test['test_idx'] data_ids = test['metadata']['data'] total_runtime = 0 total_mem = 0 total_status = 0 subverdicts = list() for data_id in data_ids: runtime, peakmem, status, subverdict = result_list[idx] total_runtime += runtime total_mem += peakmem total_status = max(total_status, status) subverdicts.append(subverdict) idx += 1 result.append({ 'test_idx': test_idx, 'state': total_status, 'runtime': total_runtime, 'peakmem': total_mem, 'verdict': subverdicts, }) callback({ 'chal_id': chal_id, 'result': result, }) except Exception: traceback.print_exception(*sys.exc_info()) callback({ 'chal_id': chal_id, 'verdict': None, 'result': None, }) finally: JudgeDispatcher.chal_running_count -= 1 JudgeDispatcher.emit_chal()
def start_chal(obj, callback): """Start a challenge. Check the challenge config, issue judge tasks, then report the result. Args: obj (dict): Challenge config. callback: Challenge callback. Returns: None """ # The worst exception, there is no chal_id in the obj. chal_id = None try: chal_id = obj["chal_id"] code_path = obj["code_path"] res_path = obj["res_path"] test_list = obj["test"] metadata = obj["metadata"] comp_type = obj["comp_type"] check_type = obj["check_type"] test_paramlist = list() assert comp_type in ["g++", "clang++", "makefile", "python3"] assert check_type in ["diff", "ioredir"] for test in test_list: test_idx = test["test_idx"] memlimit = test["memlimit"] timelimit = test["timelimit"] data_ids = test["metadata"]["data"] for data_id in data_ids: test_paramlist.append( { "in": res_path + "/testdata/%d.in" % data_id, "ans": res_path + "/testdata/%d.out" % data_id, "timelimit": timelimit, "memlimit": memlimit, } ) chal = StdChal(chal_id, code_path, comp_type, check_type, res_path, test_paramlist, metadata) result_list = yield chal.start() result = [] idx = 0 for test in test_list: test_idx = test["test_idx"] data_ids = test["metadata"]["data"] total_runtime = 0 total_mem = 0 total_status = 0 subverdicts = list() for data_id in data_ids: runtime, peakmem, status, subverdict = result_list[idx] total_runtime += runtime total_mem += peakmem total_status = max(total_status, status) subverdicts.append(subverdict) idx += 1 result.append( { "test_idx": test_idx, "state": total_status, "runtime": total_runtime, "peakmem": total_mem, "verdict": subverdicts, } ) callback({"chal_id": chal_id, "result": result}) except Exception: traceback.print_exception(*sys.exc_info()) callback({"chal_id": chal_id, "verdict": None, "result": None}) finally: JudgeDispatcher.chal_running_count -= 1 JudgeDispatcher.emit_chal()
def start_chal(obj, websk): '''Start a challenge. Check the challenge config, issue judge tasks, then report the result. Args: obj (dict): Challenge config. websk (WebSocketHandler): Websocket object. Returns: None ''' # The worst exception, there is no chal_id in the obj. chal_id = None try: chal_id = obj['chal_id'] code_path = obj['code_path'] res_path = obj['res_path'] test_list = obj['test'] metadata = obj['metadata'] comp_type = obj['comp_type'] check_type = obj['check_type'] test_paramlist = list() assert comp_type in ['g++', 'clang++', 'makefile', 'python3'] assert check_type in ['diff', 'ioredir'] for test in test_list: test_idx = test['test_idx'] memlimit = test['memlimit'] timelimit = test['timelimit'] data_ids = test['metadata']['data'] for data_id in data_ids: test_paramlist.append({ 'in': res_path + '/testdata/%d.in'%data_id, 'ans': res_path + '/testdata/%d.out'%data_id, 'timelimit': timelimit, 'memlimit': memlimit, }) chal = StdChal(chal_id, code_path, comp_type, check_type, \ res_path, test_paramlist, metadata) result_list, verdict = yield chal.start() result = [] idx = 0 for test in test_list: test_idx = test['test_idx'] data_ids = test['metadata']['data'] total_runtime = 0 total_mem = 0 total_status = 0 for data_id in data_ids: runtime, peakmem, status = result_list[idx] total_runtime += runtime total_mem += peakmem total_status = max(total_status, status) idx += 1 result.append({ 'test_idx': test_idx, 'state': total_status, 'runtime': total_runtime, 'peakmem': total_mem, 'verdict': '' }) websk.write_message(json.dumps({ 'chal_id': chal_id, 'verdict': verdict, 'result': result, })) except Exception: traceback.print_exception(*sys.exc_info()) websk.write_message(json.dumps({ 'chal_id': chal_id, 'verdict': None, 'result': None, })) finally: JudgeHandler.chal_running_count -= 1 JudgeHandler.emit_chal()
def start_chal(obj, websk): '''Start a challenge. Check the challenge config, issue judge tasks, then report the result. Args: obj (dict): Challenge config. websk (WebSocketHandler): Websocket object. Returns: None ''' chal_id = obj['chal_id'] code_path = '/srv/nfs' + obj['code_path'][4:] test_list = obj['testl'] res_path = '/srv/nfs' + obj['res_path'][4:] test_paramlist = list() comp_type = test_list[0]['comp_type'] assert comp_type in ['g++', 'clang++', 'makefile', 'python3'] for test in test_list: assert test['comp_type'] == comp_type assert test['check_type'] == 'diff' test_idx = test['test_idx'] memlimit = test['memlimit'] timelimit = test['timelimit'] data_ids = test['metadata']['data'] for data_id in data_ids: test_paramlist.append({ 'in': res_path + '/testdata/%d.in'%data_id, 'ans': res_path + '/testdata/%d.out'%data_id, 'timelimit': timelimit, 'memlimit': memlimit, }) chal = StdChal(chal_id, code_path, comp_type, res_path, test_paramlist) result_list = yield chal.start() idx = 0 for test in test_list: test_idx = test['test_idx'] data_ids = test['metadata']['data'] total_runtime = 0 total_mem = 0 total_status = 0 for data_id in data_ids: runtime, peakmem, status = result_list[idx] total_runtime += runtime total_mem += peakmem total_status = max(total_status, status) idx += 1 websk.write_message(json.dumps({ 'chal_id': chal_id, 'test_idx': test_idx, 'state': total_status, 'runtime': total_runtime, 'memory': total_mem, })) JudgeHandler.chal_running_count -= 1 JudgeHandler.emit_chal()