def evaluate( self, testcase: int, subtask: int, input: File, validation: Optional[File], correct_output: Optional[File]) -> (List[Execution], Execution): limits = Resources() limits.cpu_time = self.task.time_limit limits.wall_time = self.task.time_limit * 1.5 limits.memory = self.task.memory_limit_kb inputs = dict() if validation: inputs["tm_wait_validation"] = validation if self.task.input_file: inputs[self.task.input_file] = input stdin = None else: stdin = input outputs = [] if self.task.output_file: outputs.append(self.task.output_file) eval = Execution("Evaluation of %s on testcase %d" % (self.solution.name, testcase), self.solution.pool, self.solution, [], "evaluation", { "name": self.solution.name, "subtask": subtask, "testcase": testcase }, cache_on=[CacheMode.ALL], extra_time=self.config.extra_time, limits=limits, can_exclusive=True, stdin=stdin, inputs=inputs, outputs=outputs) if self.task.output_file: output = eval.output(self.task.output_file) else: output = eval.stdout check = get_checker_execution( self.solution.pool, self.task, self.solution.name, subtask, testcase, self.checker, input, output, correct_output, "Checking solution %s for testcase %d" % (self.solution.name, testcase)) return [eval], check
def check_sample_cases(task: IOITask, pool: ExecutionPool, interface: IOIUIInterface): """ Check if the sample cases in the statement are valid and the output is correct """ # Communication tasks does not have output files if task.task_type != TaskType.Batch: return # without official solution we cannot solve the input files if not task.official_solution: return inputs = list_files([ "statement/input*.txt", "statement/{}.input*.txt".format(task.name), "testo/input*.txt", "testo/{}.input*.txt".format(task.name) ], valid_extensions=[".txt"]) outputs = list_files([ "statement/output*.txt", "statement/{}.output*.txt".format(task.name), "testo/output*.txt", "testo/{}.output*.txt".format(task.name) ], valid_extensions=[".txt"]) num_to_input = dict() # type: Dict[int, str] num_to_output = dict() # type: Dict[int, str] num_to_input_file = dict() # type: Dict[int, File] num_to_output_file = dict() # type: Dict[int, File] num_to_sol_output_file = dict() # type: Dict[int, File] num_to_validation = dict() # type: Dict[int, File] for infile in inputs: match = re.match(r".*input(\d+).txt", infile) # invalid sample file format, skip it if not match: continue sample_num = int(match.group(1)) num_to_input[sample_num] = infile num_to_input_file[sample_num] = pool.frontend.provideFile( infile, "Sample input {}".format(infile), False) # skip the validation if there is no default validator if not task.default_val: continue in_files = {VALIDATION_INPUT_NAME: num_to_input_file[sample_num]} validation = Execution("Validation of sample input {}".format(infile), pool, task.default_val.source_file, [VALIDATION_INPUT_NAME, "0"], "sanity-check-validation", {"sample_testcase": sample_num}, inputs=in_files) num_to_validation[sample_num] = validation.stdout _setup_execution_callback( interface, validation, "Validation of sample input {} failed".format(infile)) # if the output files were not yet generated (e.g. when they are just # copied), the solution is not prepared if not task.official_solution.prepared: task.official_solution.prepare(pool) for outfile in outputs: match = re.match(r".*output(\d+).txt", outfile) if not match: continue sample_num = int(match.group(1)) # skip the output if there is no corresponding input if sample_num not in num_to_input: continue num_to_output[sample_num] = outfile num_to_output_file[sample_num] = pool.frontend.provideFile( outfile, "Sample output {}".format(outfile), False) in_files = dict() # if the validator is not present we don't wait for it if sample_num in num_to_validation: in_files["wait_for_validation"] = num_to_validation[sample_num] if task.input_file: in_files[task.input_file] = num_to_input_file[sample_num] stdin = None else: stdin = num_to_input_file[sample_num] out_files = [] if task.output_file: out_files.append(task.output_file) solving = Execution("Solving sample output {}".format(outfile), pool, task.official_solution, [], "sanity-check-solution", {"sample_testcase": sample_num}, inputs=in_files, stdin=stdin, outputs=out_files) if task.output_file: num_to_sol_output_file[sample_num] = solving.output( task.output_file) else: num_to_sol_output_file[sample_num] = solving.stdout _setup_execution_callback( interface, solving, "Solution of sample input {} failed".format( num_to_input[sample_num])) check = get_checker_execution( pool, task, "", -1, sample_num, task.checker, num_to_input_file[sample_num], num_to_output_file[sample_num], num_to_sol_output_file[sample_num], "Checking sample output {}".format(outfile), {"sanity_check": True}) _setup_checker_callback( interface, check, "Checking sample output {} failed".format(outfile), task.checker is not None)
class SourceFile: """ A SourceFile contains a ref to a source file, this class will manage it's compilation and execution. """ @staticmethod def from_file(path: str, unit_name: str, copy_executable: bool, write_to: Optional[str], target_arch: Arch, grader_map: Dict[Language, GraderInfo]) -> "SourceFile": """ Handy constructor to build a SourceFile :param path: path to the source file :param unit_name: name of the unit of this source file. Usually the name of the task :param copy_executable: Whether to copy the executable into write_to :param write_to: Where to copy the executable, if copy_executable :param target_arch: Architecture to target the build :param grader_map: Map with the graders for all the languages """ if copy_executable and not write_to: raise ValueError( "Asked to copy the executable but not specified where") old_path = path if not os.path.exists(path): path = find_executable(path) if not path: raise ValueError("Cannot find %s" % old_path) language = LanguageManager.from_file(path) dependencies = language.get_dependencies(path) grader = grader_map.get(language) exe_name = language.exe_name(path, write_to) source_file = SourceFile(path, unit_name, exe_name, dependencies, language, copy_executable and write_to, target_arch, grader) if not language.need_compilation: if not is_executable(source_file.path): raise ValueError("The file %s is not an executable. " "Please check the shebang (#!)" % path) return source_file def __init__(self, path: str, unit_name: str, exe_name: str, dependencies: List[Dependency], language: Language, write_bin_to: Optional[str], target_arch: Arch, grader: Optional["GraderInfo"]): self.path = path self.unit_name = unit_name self.dependencies = dependencies self.language = language self.write_bin_to = write_bin_to self.target_arch = target_arch self.grader = grader self.name = os.path.basename(path) self.exe_name = exe_name self.pool = None # type: ExecutionPool # set only after `prepare` self.executable = None # type: Optional[File] self.compilation = None # type: Optional[Execution] self.compilation_stderr = None # type: Optional[File] self.compilation_stdout = None # type: Optional[File] @property def prepared(self) -> bool: return self.pool is not None def unprepare(self): """ Unprepare the source file. Useful for recompiling it in a different execution pool. """ self.pool = None self.executable = None self.compilation = None self.compilation_stderr = None self.compilation_stdout = None def prepare(self, pool: ExecutionPool): """ Prepare the source file for execution, compile the source if needed. After this call self.executable will be available. If the source file is to compile then compilation_stderr and compilation_stdout will be available too """ if self.prepared: return self.pool = pool if self.language.need_compilation: self._compile() else: self._not_compile() if self.write_bin_to and not self.pool.config.dry_run: self.executable.getContentsToFile(self.write_bin_to, True, True) def _compile(self): compilation_files = [self.name] if self.grader: compilation_files += [d.name for d in self.grader.files] cmd_type, cmd = self.language.get_compilation_command( compilation_files, self.exe_name, self.unit_name, True, self.target_arch) if cmd_type != CommandType.SYSTEM: raise ValueError("Local file compilers are not supported yet") if not cmd: raise ValueError("Unexpected empty compiler command") if self.language.need_unit_name: source_name = self.unit_name + self.language.source_extensions[0] else: source_name = self.name inputs = { source_name: self.pool.frontend.provideFile(self.path, "Source file for " + self.name, False) } for dep in self.dependencies: inputs[dep.name] = self.pool.frontend.provideFile( dep.path, dep.path, False) if self.grader: for dep in self.grader.files: inputs[dep.name] = self.pool.frontend.provideFile( dep.path, dep.path, False) self.compilation = Execution("Compilation of %s" % self.name, self.pool, cmd[0], cmd[1:], "compilation", { "file": self.name, "path": self.path }, inputs=inputs, outputs=[(self.exe_name, True)], stdout_limit=COMPILATION_STDERR_LIMIT, stderr_limit=COMPILATION_STDERR_LIMIT, store_stderr=True, store_stdout=True) self.executable = self.compilation.output(self.exe_name) def _not_compile(self): self.executable = self.pool.frontend.provideFile( self.path, "Source file for " + self.name, True) def __repr__(self): return "<SourceFile path=%s language=%s>" % (self.path, self.language)
def generate_inputs( pool: ExecutionPool, task: IOITask, interface: IOIUIInterface ) -> (Dict[Tuple[int, int], File], Dict[Tuple[int, int], File], Dict[Tuple[ int, int], File]): """ Create the part of the DAG responsible for the input and output files. Will return 3 dicts: one for input, one for output and one for validations. Each dict has (subtask number, test case number) -> File """ def add_non_solution(source: SourceFile): if not source.prepared: source.prepare(pool) interface.add_non_solution(source) inputs = dict() # type: Dict[Tuple[int, int], File] outputs = dict() # type: Dict[Tuple[int, int], File] validations = dict() # type: Dict[Tuple[int, int], File] for st_num, subtask in task.subtasks.items(): for tc_num, testcase in subtask.testcases.items(): testcase_id = (st_num, tc_num) if testcase.validator: add_non_solution(testcase.validator.source_file) # static input file if testcase.input_file: try: inputs[testcase_id] = pool.frontend.provideFile( testcase.input_file, "Static input %d" % tc_num, False) if testcase.validator: val = Execution("Validation of input %d" % tc_num, pool, testcase.validator.source_file, testcase.validator.get_args( testcase, subtask, tc_num, st_num + 1), "validation", { "subtask": st_num, "testcase": tc_num }, inputs={ VALIDATION_INPUT_NAME: inputs[testcase_id] }, store_stderr=True) validations[testcase_id] = val.stdout interface.add_validation(st_num, tc_num, val) except RuntimeError as ex: interface.add_error(str(ex)) interface.subtasks[st_num][ tc_num].status = TestcaseGenerationStatus.FAILURE continue # generate input file else: add_non_solution(testcase.generator.source_file) deps = dict() for dep in testcase.extra_deps: deps[dep.name] = pool.frontend.provideFile( dep.path, dep.path, False) gen = Execution("Generation of input %d" % tc_num, pool, testcase.generator.source_file, testcase.generator_args, "generation", { "subtask": st_num, "testcase": tc_num }, inputs=deps, store_stderr=True) inputs[testcase_id] = gen.stdout interface.add_generation(st_num, tc_num, gen) val = Execution( "Validation of input %d" % tc_num, pool, testcase.validator.source_file, testcase.validator.get_args(testcase, subtask, tc_num, st_num + 1), "validation", { "subtask": st_num, "testcase": tc_num }, inputs={VALIDATION_INPUT_NAME: inputs[testcase_id]}, store_stderr=True) validations[testcase_id] = val.stdout interface.add_validation(st_num, tc_num, val) if testcase.write_input_to and not pool.config.dry_run: inputs[testcase_id].getContentsToFile(testcase.write_input_to) if task.task_type == TaskType.Batch: # static output file if testcase.output_file: outputs[testcase_id] = pool.frontend.provideFile( testcase.output_file, "Static output %d" % tc_num, False) else: add_non_solution(task.official_solution) deps = {"wait_for_validation": validations[testcase_id]} if task.input_file: deps[task.input_file] = inputs[testcase_id] stdin = None else: stdin = inputs[testcase_id] outs = [] if task.output_file: outs.append(task.output_file) sol = Execution("Generation of output %d" % tc_num, pool, task.official_solution, [], "solution", { "subtask": st_num, "testcase": tc_num }, inputs=deps, outputs=outs, stdin=stdin, store_stderr=True) if task.output_file: outputs[testcase_id] = sol.output(task.output_file) else: outputs[testcase_id] = sol.stdout interface.add_solving(st_num, tc_num, sol) if testcase.write_output_to and not pool.config.dry_run: outputs[testcase_id].getContentsToFile( testcase.write_output_to, True, True) if task.checker: add_non_solution(task.checker) return inputs, outputs, validations
def compile_booklet( pool: ExecutionPool, statements: List["OIITexStatement"], language: Optional[str] = None ) -> Tuple[Execution, File, List[StatementDepInfo]]: inputs = dict() other_executions = [] # type: List[StatementDepInfo] data_dir = os.path.join(get_template_dir(), "data") template_files = set(get_files(data_dir)) packages = set() # type: Set[str] statement_files = [] # type: List[str] task_names = list() # type: List[str] for statement in statements: task_name = statement.task.name task_names.append(task_name) with open(statement.path, "r") as f: content = f.read() tex = extract_packages(content) packages |= set(tex.packages) task_tex_file = build_task_tex_file(pool.config, statement.task, tex.content, language) params = get_template_parameters(pool.config, statement.task, language) deps = get_dependencies(os.path.dirname(statement.path), task_tex_file, params.get("logo")) file = pool.frontend.provideFileContent( task_tex_file, "Statement file %s" % statement.name) inputs[os.path.join(task_name, statement.name)] = file statement_files.append(os.path.join(task_name, statement.name)) for dep in deps: if os.path.join(data_dir, dep.name) in template_files: # skip the template files continue # non asy files, like images or other tex files, they are just # copied inside the sandbox if os.path.splitext(dep.path)[1] != ".asy": file = pool.frontend.provideFile( dep.path, "Statement dependency %s" % dep.name) if os.path.join(task_name, dep.name) not in inputs: inputs[os.path.join(task_name, dep.name)] = file continue # the asymptote files needs to be compiled into pdf and then # cropped name = dep.name.replace(".asy", ".pdf") # compile the asy file like a normal source file source_file = SourceFile.from_file(dep.path, dep.name, False, None, Arch.DEFAULT, dict()) source_file.prepare(pool) other_executions.append( StatementDepInfo(dep.name, source_file.compilation)) # crop the pdf using pdfcrop crop = Execution("Crop compiled asy file %s" % dep.name, pool, "pdfcrop", ["file.pdf"], "asy-cropping", {"file": dep.name}, inputs={"file.pdf": source_file.executable}, outputs=["file-crop.pdf"]) other_executions.append( StatementDepInfo("Crop %s" % source_file.exe_name, crop)) inputs[os.path.join(task_name, name)] = crop.output("file-crop.pdf") booklet_tex = build_contest_tex_file(pool.config, packages, statement_files, language) booklet = pool.frontend.provideFileContent(booklet_tex, "Booklet source file") inputs["booklet.tex"] = booklet # add the template files to the sandbox for path in template_files: name = path[len(data_dir) + 1:] file = pool.frontend.provideFile(path, "Template file %s" % name) inputs[name] = file # TODO eventually use directly latexmk when setting env vars will be # supported compilation = Execution( "Compilation of booklet", pool, "env", [ "TEXINPUTS=.:%s:" % ":".join(task_names), "latexmk", "-f", "-interaction=nonstopmode", "-pdf", "booklet.tex" ], "statement-compilation", {"language": language}, inputs=inputs, outputs=["booklet.pdf"]) pdf_file = compilation.output("booklet.pdf") return compilation, pdf_file, other_executions