def run_case_output(revision, case_set, solution): """ report: similar to generating cases, [{ }, { }, ... { }] """ current_task = Task.objects.create(revision=revision, abstract="RUN OUTPUT, %d cases" % len(case_set)) try: runner = Runner(solution) result = [] failed = False for case in case_set: if case.output_lock: continue # output content protected with UpdateManager(case, revision) as case: case.output_file.save("out_" + random_string(), ContentFile(b''), save=False) run_result = runner.run(stdin=case.input_file.path, stdout=case.output_file.path, max_time=revision.time_limit * 3 / 1000, max_memory=revision.memory_limit * 2) CaseManagementTools.reformat_file(case.output_file.path, revision.well_form_policy) case.save_fingerprint(revision.problem_id) with transaction.atomic(): case.save() result.append({ "case_number": case.case_number, "success": run_result["verdict"] == "OK", "detail": run_result }) if run_result["verdict"] != "OK": failed = True current_task.status = -2 current_task.report = json.dumps(result) current_task.save()
def validate_case(revision, case_set, validator): """ report: similar to generating cases, [{ }, { }, ... { }] """ current_task = Task.objects.create(revision=revision, abstract="VALIDATE, %d cases" % len(case_set)) try: runner = Runner(validator) result = [] failed = False for case in case_set: output_path = path.join(runner.workspace, "out") error_path = path.join(runner.workspace, "err") log_path = path.join(runner.workspace, "log") args = ["--testOverviewLogFileName", log_path] if revision.enable_group: args.extend(["--group", str(case.group)]) if case.in_samples: args.extend(["--testset", "samples"]) elif case.in_pretests: args.extend(["--testset", "pretests"]) run_result = runner.run(args=args, stdin=case.input_file.path, stdout=output_path, stderr=error_path, max_time=revision.time_limit * 3 / 1000, max_memory=revision.memory_limit * 2) with transaction.atomic(): result.append({ "case_number": case.case_number, "success": run_result["verdict"] == "OK", "comment": CaseManagementTools.read_abstract(output_path), "stderr": CaseManagementTools.read_abstract(error_path), "log": CaseManagementTools.read_abstract(log_path), "exit_code": run_result["exit_code"] }) if run_result["verdict"] != "OK": failed = True current_task.status = -2 current_task.report = json.dumps(result) current_task.save()
def test_compile(self): program = Program(name="hello", lang="cpp", code="int main() { return 0; }", tag="solution_main_correct") runner = Runner(program) with self.assertRaises(CompileError): program = Program(name="hello", lang="cpp", code="int main() { return 0 }", tag="solution_main_correct") runner = Runner(program) program = Program(name="hello", lang="java", code="class Main { }", tag="solution_main_correct") runner = Runner(program)
def generate_cases(revision, commands): """ report: [ { success: True / False error: ... case_number: 1 detail: ... }, { ... }, ... ] """ generators = {} current_task = Task.objects.create(revision=revision, abstract="GENERATE CASES") report = [] for command_string in commands: ret = {"command": command_string} command = command_string.split() program_name, program_args = command[0], command[1:] try: if program_name not in generators: program = revision.programs.get(name=program_name, tag="generator") generators[program_name] = Runner(program) elif isinstance(generators[program_name], CompileError): raise generators[program_name] runner = generators[program_name] if revision.cases.all().count(): case_number = revision.cases.all().aggregate(Max("case_number"))["case_number__max"] + 1 else: case_number = 1 new_case = Case(create_time=datetime.now(), description="Gen \"%s\"" % command_string, case_number=case_number) new_case.input_file.save("in_" + random_string(), ContentFile(b""), save=False) new_case.output_file.save("out_" + random_string(), ContentFile(b""), save=False) running_result = runner.run(args=program_args, stdout=new_case.input_file.path, max_time=revision.time_limit * 5 / 1000, max_memory=revision.memory_limit * 3) CaseManagementTools.reformat_file(new_case.input_file.path, revision.well_form_policy) new_case.save_fingerprint(revision.problem_id) ret["case_number"] = case_number with transaction.atomic(): new_case.save() revision.cases.add(new_case) ret.update(case_number=case_number, success=running_result["verdict"] == "OK", detail=running_result, generated=new_case.input_preview) except (Program.MultipleObjectsReturned, Program.DoesNotExist): ret.update(success=False, error="There should be exactly one program tagged 'generator' that fits the command.") except CompileError as e: generators[program_name] = e ret.update(success=False, error=e.error) report.append(ret) current_task.status = -2 current_task.report = json.dumps(report) current_task.status = 0 if all(map(lambda r: r["success"], report)) else -1 current_task.save()
def test_run(self): code = """a, b = map(int, input().split()) print(a + b)""" code_time_limit = """int main() { int p = 999999999; int a = 0; while (1) { a++; } }""" program = Program(name="hello", lang="python", code=code, tag="solution_main_correct") runner = Runner(program) self.assertEqual(runner.workspace, os.getcwd()) with open("1.in", "w") as f: f.write("1 2") result = runner.run(stdin="1.in", stdout="1.out", stderr="1.err") with open("1.out", "r") as f: self.assertEqual(f.read().strip(), "3") print(result) program = Program(name="hello2", lang="cpp", code=code_time_limit, tag="solution_main_correct") runner = Runner(program) self.assertEqual(runner.workspace, os.getcwd()) result = runner.run(stderr="runtime.log") print(result) self.assertEqual(result["verdict"], "TIME_LIMIT")
def check_case(revision, case_set, solution_set, checker): """ response: { "success": True / False, "error": ..., "tasks": [ { "verdict": "OK", "solution": 23, "case_number": 45, "time": 15, "memory": 30, }, ... ], "summary": { solution_number: { time, max_time, memory, points } } } """ current_task = Task.objects.create( revision=revision, abstract="CHECK, %d cases, %d solutions" % (len(case_set), len(solution_set))) packed_result = {"success": True, "tasks": [], "summary": {}} try: solution_runners = [(solution, Runner(solution)) for solution in solution_set] if checker is None: checker = CaseManagementTools.obtain_defaultspj() checker_runner = Runner(checker) checker_result_path = path.join(checker_runner.workspace, "result") task_result = packed_result["tasks"] verdict_for_each_solution = { solution.id: set() for solution in solution_set } for case in case_set: for solution, runner in solution_runners: output_path = path.join(runner.workspace, "out") err_path = path.join(runner.workspace, "err") result = { "solution": solution.id, "case_number": case.case_number, "case_id": case.id } running_result = runner.run( stdin=case.input_file.path, stdout=output_path, max_time=revision.time_limit / 1000, max_memory=revision.memory_limit) result.update(running_result) result.update( input=CaseManagementTools.read_abstract( case.input_file.path), answer=CaseManagementTools.read_abstract( case.output_file.path), output=CaseManagementTools.read_abstract(output_path), stderr=CaseManagementTools.read_abstract(err_path)) if result["verdict"] == "OK": # run checker checking_result = checker_runner.run( args=[ case.input_file.path, output_path, case.output_file.path ], stdout=checker_result_path, max_time=revision.time_limit / 1000 * 3, max_memory=revision.memory_limit) result.update( checker_comment=CaseManagementTools.read_abstract( checker_result_path), checker_exit_code=checking_result["exit_code"]) if checking_result["verdict"] != "OK": result.update(verdict="WRONG_ANSWER") if result["verdict"] == "OK": result.update(points=case.points) else: result.update(points=0) result.update(total_points=case.points) verdict_for_each_solution[solution.id].add( result["verdict"]) task_result.append(result) for solution in solution_set: got_verdicts = verdict_for_each_solution[solution.id] if solution.tag in ('solution_main', 'solution_correct' ) and got_verdicts != {"OK"}: packed_result.update( success=False, error= "'%s' claims to be correct, but got rejected in tests" % solution.name) if solution.tag == 'solution_tle_or_ok' and got_verdicts != { "TIME_LIMIT", "OK" }: packed_result.update( success=False, error="'%s' claims to be tle_or_ok, but got %s" % (solution.name, str(got_verdicts))) if solution.tag == 'solution_wa' and 'WRONG_ANSWER' not in got_verdicts: packed_result.update( success=False, error="'%s' claims to be WA, but never got WA" % solution.name) if solution.tag == 'solution_incorrect' and got_verdicts == { "OK" }: packed_result.update( success=False, error= "'%s' claims to be incorrect, but is actually correct" % solution.name) if solution.tag == 'solution_fail' and "RUNTIME_ERROR" not in got_verdicts: packed_result.update( success=False, error="'%s' claims to fail, but didn't fail" % solution.name) solution_based_result = list( filter(lambda x: x["solution"] == solution.id, task_result)) solution_time_summary = list( map(lambda x: x["time"], solution_based_result)) + [0] packed_result["summary"][solution.id] = { "time": max(solution_time_summary), "sum_time": sum(solution_time_summary), "memory": max( list( map(lambda x: x["memory"], solution_based_result)) + [0]), "points": sum( list( map(lambda x: x["points"], solution_based_result)) + [0]) / max( sum( list( map(lambda x: x["total_points"], solution_based_result)) + [0]), 1) * 100 } current_task.status = -2 current_task.report = json.dumps(packed_result) current_task.save() except CompileError as e: packed_result.update(success=False, error=e.error) except ValueError as e: packed_result.update(success=True, error=e.args[0]) current_task.status = 0 if packed_result["success"] else -1 current_task.report = json.dumps(packed_result) current_task.save()