def _generate_input_file(self): """ In case the input is not static, generates the input using the generation command """ if self.input_static: return if self._input_generator_name is None: logger.error("A testcase has neither a generator nor a static input") self.input_generation_log = "Generation failed. No generator specified." self.input_generation_successful = False elif self._input_generator is None: self.input_generation_log = "Generation failed. Generator {} not found".format( self._input_generator_name, ) self.input_generation_successful = False else: generation_command = get_execution_command(self._input_generator.source_language, "generator") generation_command.extend(shlex.split(self._input_generation_parameters)) stdout_redirect = "output.txt" try: generator_compiled = self._input_generator.compiled_file except: self.input_generation_log = "Generation failed. Generator didn't compile. Log: {}".format( self._input_generator.last_compile_log ) self.save() return action = ActionDescription( commands=[generation_command], executables=[("generator", generator_compiled)], stdout_redirect=stdout_redirect, output_files=[stdout_redirect], time_limit=settings.FAILSAFE_TIME_LIMIT, memory_limit=settings.FAILSAFE_MEMORY_LIMIT ) success, execution_success, outputs, sandbox_datas = execute_with_input(action) if not success: logger.error("Generating input for testcase {} failed.\n Sandbox data:\n{}".format( str(self), str(sandbox_datas[0])) ) self.input_generation_log = \ "System failed to generate the input. " \ "Check the logs for more details. " \ "This issue must be resolved by a system administrator" self.input_generation_successful = False elif not execution_success: self.input_generation_log = "Generation failed. {}.".format( str(sandbox_datas[0]) ) self.input_generation_successful = False else: self._input_generated_file = outputs[stdout_redirect] self.input_generation_log = "Generation successful." self.input_generation_successful = True self.save()
def run_checker(source_file, input_file, jury_output, contestant_output): """ Runs compiled executable of checker source file with the parameters: checker <testcase_input> <testcase_output> <contestant_output> The checker should output the score to standard output. The first line of standard error stream is the message shown to the contestant. """ CHECKER_FILENAME = "checker" TESTCASE_INPUT_FILENAME = "input.txt" TESTCASE_OUTPUT_FILENAME = "jury.txt" CONTESTANT_OUTPUT_FILENAME = "contestant.txt" STDOUT_FILENAME = "stdout.txt" STDERR_FILENAME = "stderr.txt" compiled_checker = source_file.compiled_file if compiled_checker is None: return False, None, None, None, None, None execution_command = get_execution_command(source_file.source_language, CHECKER_FILENAME) execution_command.extend([TESTCASE_INPUT_FILENAME, TESTCASE_OUTPUT_FILENAME, CONTESTANT_OUTPUT_FILENAME]) action = ActionDescription( commands=[execution_command], executables=[(CHECKER_FILENAME, compiled_checker)], files=[ (TESTCASE_INPUT_FILENAME, input_file), (TESTCASE_OUTPUT_FILENAME, jury_output), (CONTESTANT_OUTPUT_FILENAME, contestant_output) ], stdout_redirect=STDOUT_FILENAME, stderr_redirect=STDERR_FILENAME, output_files=[STDOUT_FILENAME, STDERR_FILENAME], time_limit=getattr(settings, "FAILSAFE_TIME_LIMIT", None), memory_limit=getattr(settings, "FAILSAFE_MEMORY_LIMIT", None) ) success, execution_success, output_files, sandbox_datas = execute_with_input(action) if success and execution_success: stdout_file = output_files[STDOUT_FILENAME] stderr_file = output_files[STDERR_FILENAME] try: score = float(stdout_file.file.readline().strip()) except ValueError: message = "First line of output must contain a single number, the score." return False, None, None, stdout_file, stderr_file, message if 0 <= score <= 1: message = "Scored successfully." contestant_comment = str(output_files[STDERR_FILENAME].file.readline()) return True, score, contestant_comment, stdout_file, stderr_file, message else: message = "Score must be between 0 and 1." return False, None, None, stdout_file, stderr_file, message else: return False, None, None, None, None, None
def _run(self): # TODO: Make sure testcase has already been generated validation_command = get_execution_command( self.validator.source_language, "validator") validation_command.append("input.txt") validator_compiled_file = self.validator.compiled_file if validator_compiled_file is None: self.validation_message = "Validation failed. Validator didn't compile" self.valid = False self.executed = True self.exit_status = "Compilation Error" self.save() return action = ActionDescription( commands=[validation_command], files=[("input.txt", self.testcase.input_file)], executables=[("validator", self.validator.compiled_file)], time_limit=settings.FAILSAFE_TIME_LIMIT, memory_limit=settings.FAILSAFE_MEMORY_LIMIT, stdin_redirect="input.txt", stderr_redirect="stderr.txt", output_files=["stderr.txt"]) success, execution_success, outputs, data = execute_with_input(action) if success: self.exit_status = get_exit_status_human_translation( data[0]["exit_status"]) self.valid = execution_success # FIXME: This probably should be done more properly stderr_file = outputs["stderr.txt"] self.validation_message = stderr_file.file.readline() stderr_file.delete() else: self.valid = False self.validation_message = "Validation failed due to system error. " \ "Please inform the system administrator" self.exit_status = "System Error" self.executed = True self.save()
def generate_output(self, problem_code, testcase_code, language, solution_file): if language is None: language = self.judge.detect_language(solution_file[0]) if language not in self.judge.get_supported_languages(): return EvaluationResult(success=False, verdict=JudgeVerdict.invalid_submission, message="Language not supported") revision = self.parse_code(problem_code) graders = [(grader.name, grader.code) for grader in revision.grader_set.all() if any([ grader.name.endswith(x) for x in get_valid_extensions(language) ])] name, file = solution_file code_name = revision.problem_data.code_name compiled_file_name = "code.out" normal_names = [name] prioritized_names = [] for grader_name, _ in graders: if len(get_valid_extensions(language)) > 0 and \ grader_name == "grader{ext}".format( ext=get_valid_extensions(language)[0]): prioritized_names.append(grader_name) else: normal_names.append(grader_name) compile_commands = get_compilation_commands( language, prioritized_names + normal_names, compiled_file_name) action = ActionDescription( commands=compile_commands, files=[(name, file)] + graders, output_files=[compiled_file_name], time_limit=self.judge.compile_time_limit, memory_limit=self.judge.compile_memory_limit, ) time_limit = revision.problem_data.time_limit memory_limit = revision.problem_data.memory_limit testcase = revision.testcase_set.get(name=testcase_code) success, compilation_success, outputs, stdout, stderr, compilation_sandbox_data = compile_source( action) if not success or not compilation_success or outputs[ compiled_file_name] is None: compilation_message = "Compilation not successful" compilation_message += "Standard output:\n" + stdout compilation_message += "Standard error:\n" + stderr return EvaluationResult(success=False, message=compilation_message, verdict=JudgeVerdict.compilation_failed) compiled = outputs[compiled_file_name] if language == "java": if "grader.java" in [name for name, _ in graders]: main = "grader" else: main = code_name else: main = None execution_command = get_execution_command(language, compiled_file_name, main=main) stdout_redirect = "output.txt" action = ActionDescription(commands=[execution_command], executables=[(compiled_file_name, compiled) ], files=[("input.txt", testcase.input_file)], stdin_redirect="input.txt", stdout_redirect=stdout_redirect, output_files=[stdout_redirect], time_limit=time_limit, memory_limit=memory_limit) success, execution_success, outputs, execution_sandbox_datas = execute_with_input( action) if not success: return EvaluationResult(success=False, verdict=JudgeVerdict.invalid_submission, message="Sandbox error") else: evaluation_success = True if not execution_success: output_file = None else: output_file = outputs[stdout_redirect] return EvaluationResult( success=evaluation_success, output_file=output_file, execution_time=execution_sandbox_datas[0]["execution_time"], execution_memory=sum( int(sandbox["execution_memory"]) for sandbox in execution_sandbox_datas) / 1024, verdict=self.judge.get_verdict_from_exit_status( execution_sandbox_datas[0]["exit_status"]), )