Exemplo n.º 1
0
def _setup_execution_callback(interface: IOIUIInterface, execution: Execution,
                              error_message: str):
    def on_done(result: Result):
        if result.status != ResultStatus.SUCCESS:
            interface.add_error(error_message)

    execution.bind(on_done)
Exemplo n.º 2
0
    def evaluate(
            self, testcase: int, subtask: int, input: File,
            validation: Optional[File],
            correct_output: Optional[File]) -> (List[Execution], Execution):
        limits = Resources()
        limits.cpu_time = self.task.time_limit
        limits.wall_time = self.task.time_limit * 1.5
        limits.memory = self.task.memory_limit_kb
        inputs = dict()
        if validation:
            inputs["tm_wait_validation"] = validation
        if self.task.input_file:
            inputs[self.task.input_file] = input
            stdin = None
        else:
            stdin = input
        outputs = []
        if self.task.output_file:
            outputs.append(self.task.output_file)
        eval = Execution("Evaluation of %s on testcase %d" %
                         (self.solution.name, testcase),
                         self.solution.pool,
                         self.solution, [],
                         "evaluation", {
                             "name": self.solution.name,
                             "subtask": subtask,
                             "testcase": testcase
                         },
                         cache_on=[CacheMode.ALL],
                         extra_time=self.config.extra_time,
                         limits=limits,
                         can_exclusive=True,
                         stdin=stdin,
                         inputs=inputs,
                         outputs=outputs)
        if self.task.output_file:
            output = eval.output(self.task.output_file)
        else:
            output = eval.stdout

        check = get_checker_execution(
            self.solution.pool, self.task, self.solution.name, subtask,
            testcase, self.checker, input, output, correct_output,
            "Checking solution %s for testcase %d" %
            (self.solution.name, testcase))

        return [eval], check
Exemplo n.º 3
0
    def _compile(self):
        compilation_files = [self.name]
        if self.grader:
            compilation_files += [d.name for d in self.grader.files]

        cmd_type, cmd = self.language.get_compilation_command(
            compilation_files, self.exe_name, self.unit_name, True,
            self.target_arch)

        if cmd_type != CommandType.SYSTEM:
            raise ValueError("Local file compilers are not supported yet")
        if not cmd:
            raise ValueError("Unexpected empty compiler command")

        if self.language.need_unit_name:
            source_name = self.unit_name + self.language.source_extensions[0]
        else:
            source_name = self.name
        inputs = {
            source_name:
            self.pool.frontend.provideFile(self.path,
                                           "Source file for " + self.name,
                                           False)
        }
        for dep in self.dependencies:
            inputs[dep.name] = self.pool.frontend.provideFile(
                dep.path, dep.path, False)
        if self.grader:
            for dep in self.grader.files:
                inputs[dep.name] = self.pool.frontend.provideFile(
                    dep.path, dep.path, False)
        self.compilation = Execution("Compilation of %s" % self.name,
                                     self.pool,
                                     cmd[0],
                                     cmd[1:],
                                     "compilation", {
                                         "file": self.name,
                                         "path": self.path
                                     },
                                     inputs=inputs,
                                     outputs=[(self.exe_name, True)],
                                     stdout_limit=COMPILATION_STDERR_LIMIT,
                                     stderr_limit=COMPILATION_STDERR_LIMIT,
                                     store_stderr=True,
                                     store_stdout=True)
        self.executable = self.compilation.output(self.exe_name)
Exemplo n.º 4
0
    def add_solving(self, subtask: int, testcase: int, solving: Execution):
        """
        Start tracking the execution of the official solution on a testcase
        """
        testcase_status = self.subtasks[subtask][testcase]
        testcase_status.solution = solving

        def on_start():
            testcase_status.status = TestcaseGenerationStatus.SOLVING

        def on_done(result: Result):
            if result.status == ResultStatus.SUCCESS:
                testcase_status.status = TestcaseGenerationStatus.DONE
            else:
                self.add_error("Failed to generate output of testcase #%d" %
                               testcase)
                testcase_status.status = TestcaseGenerationStatus.FAILURE

        solving.bind(on_done, on_start)
Exemplo n.º 5
0
    def add_solving(self, solution: str, solving: Execution):
        """
        Start tracking the evaluation of a solution
        """
        info = self.solutions_info[solution]
        info.solution = solving

        def on_start():
            info.status = SolutionStatus.SOLVING

        def on_done(result: Result):
            if result.status == ResultStatus.SUCCESS:
                info.status = SolutionStatus.SOLVED
            else:
                self.add_error("Solution {} failed".format(solution))
                info.status = SolutionStatus.FAILED
                info.message = "Solution failed: " + result_to_str(result)

        solving.bind(on_done, on_start)
Exemplo n.º 6
0
    def add_validation(self, subtask: int, testcase: int,
                       validation: Execution):
        """
        Start tracking the validation of a testcase
        """
        testcase_status = self.subtasks[subtask][testcase]
        testcase_status.validation = validation

        def on_start():
            testcase_status.status = TestcaseGenerationStatus.VALIDATING

        def on_done(result: Result):
            if result.status == ResultStatus.SUCCESS:
                testcase_status.status = TestcaseGenerationStatus.VALIDATED
            else:
                self.add_error("Failed to validate testcase #%d" % testcase)
                testcase_status.status = TestcaseGenerationStatus.FAILURE

        validation.bind(on_done, on_start)
Exemplo n.º 7
0
    def bind(self, results_dir: str, generator: Execution, checker: Execution):
        def gen_on_done(res: Result):
            if res.status != ResultStatus.SUCCESS:
                raise RuntimeError("radamsa failed! %s %s" %
                                   (generator.name, result_to_dict(res)))
            self.num_tests += 1

        def check_on_done(res: Result):
            failed = False
            out = checker.stdout_content_bytes
            score = -1
            try:
                score = float(out.decode())
            except:
                failed = True
            if not 0.0 <= score <= 1.0:
                failed = True
            if res.status != ResultStatus.SUCCESS:
                failed = True

            if not failed:
                self.num_successes += 1
                return

            self.num_fails += 1
            dest_dir = os.path.join(results_dir, "fail%d" % self.num_fails)
            os.makedirs(dest_dir, exist_ok=True)
            with open(os.path.join(dest_dir, "fuzz_output"), "wb") as f:
                f.write(generator.stdout_content_bytes)
            with open(os.path.join(dest_dir, "checker_stdout"), "wb") as f:
                f.write(checker.stdout_content_bytes)
            with open(os.path.join(dest_dir, "checker_stderr"), "wb") as f:
                f.write(checker.stderr_content_bytes)
            with open(os.path.join(dest_dir, "data.json"), "w") as f:
                data = {
                    "data": checker.ui_print_data,
                    "result": result_to_dict(checker.result)
                }
                f.write(json.dumps(data, indent=4))

        generator.bind(gen_on_done)
        checker.bind(check_on_done)
Exemplo n.º 8
0
    def add_validation(self, solution: str, validation: Execution):
        """
        Start tracking the validation of a testcase
        """
        info = self.solutions_info[solution]
        info.validation = validation

        def on_start():
            info.status = SolutionStatus.VALIDATING

        def on_done(result: Result):
            if result.status == ResultStatus.SUCCESS:
                info.status = SolutionStatus.VALIDATED
            else:
                self.add_error(
                    "Failed to validate input for {}".format(solution))
                info.status = SolutionStatus.FAILED
                info.message = "Validator failed: " + result_to_str(result)

        validation.bind(on_done, on_start)
Exemplo n.º 9
0
def _setup_checker_callback(interface: IOIUIInterface, checking: Execution,
                            error_message: str, custom_checker: bool):
    def on_done(result: Result):
        if result.status != ResultStatus.SUCCESS:
            interface.add_error(error_message)
        if not custom_checker:
            return
        stdout = checking.stdout_content
        try:
            score = float(stdout)
        except ValueError:
            interface.add_error(error_message +
                                ": invalid score: {}".format(stdout))
            return
        if not 0.0 <= score <= 1.0:
            interface.add_error(error_message +
                                ": invalid score: {}".format(stdout))
            return

    checking.bind(on_done)
Exemplo n.º 10
0
    def add_checking(self, solution: str, checking: Execution):
        """
        Start the tracking of a checker
        """
        info = self.solutions_info[solution]
        info.checking = checking

        def on_start():
            info.status = SolutionStatus.CHECKING

        def on_done(result: Result):
            self._compute_score(solution, checking.stdout_content)
            self.ui_printer.terry_solution_outcome(solution, info)
            if result.status == ResultStatus.SUCCESS:
                info.status = SolutionStatus.DONE
            else:
                self.add_error(
                    "Checker failed on output of solution {}".format(solution))
                info.status = SolutionStatus.FAILED
                info.message = "Checker failed: " + result_to_str(result)

        checking.bind(on_done, on_start)
Exemplo n.º 11
0
    def add_generation(self, solution: str, seed: int, generation: Execution):
        """
        Start tracking the generation of a testcase
        """
        info = self.solutions_info[solution]
        info.seed = seed
        info.generation = generation

        def on_start():
            info.status = SolutionStatus.GENERATING

        def on_done(result: Result):
            if result.status == ResultStatus.SUCCESS:
                info.status = SolutionStatus.GENERATED
            else:
                self.add_error(
                    "Failed to generate input for {} with seed {}".format(
                        solution, seed))
                info.status = SolutionStatus.FAILED
                info.message = "Generator failed: " + result_to_str(result)

        generation.bind(on_done, on_start)
Exemplo n.º 12
0
    def add_evaluate_checking(self, subtask: int, testcase: int, solution: str,
                              checking: Execution):
        """
        Start tracking the checking of a solution in a testcase
        """
        has_custom_checker = self.task.checker
        custom_checker_state = CustomCheckerState(solution)

        def on_start():
            self.testing[solution].testcase_results[subtask][
                testcase].status = TestcaseSolutionStatus.CHECKING

        def on_done(result: Result):
            if has_custom_checker:
                custom_checker_state.set_result(result)
                if result.status != ResultStatus.SUCCESS:
                    self.add_error(
                        "Checker failed for testcase #%d for solution %s" %
                        (testcase, solution))
                custom_checker_state.set_stdout(checking.stdout_content)
                custom_checker_state.set_stderr(checking.stderr_content)
            else:
                self.testing[solution].update_default_check_result(
                    subtask, testcase, result)
                self.ui_printer.testcase_outcome(
                    solution, testcase, subtask,
                    self.testing[solution].testcase_results[subtask][testcase])

        def on_checked():
            self.testing[solution].update_custom_check_result(
                subtask, testcase, custom_checker_state)
            self.ui_printer.testcase_outcome(
                solution, testcase, subtask,
                self.testing[solution].testcase_results[subtask][testcase])

        if has_custom_checker:
            custom_checker_state.set_callback(on_checked)
        checking.bind(on_done, on_start)
Exemplo n.º 13
0
def get_checker_execution(pool: ExecutionPool,
                          task: IOITask,
                          name: str,
                          subtask: int,
                          testcase: int,
                          checker: Optional[SourceFile],
                          input: Optional[File],
                          output: File,
                          correct_output: File,
                          message: str,
                          extra_data: Dict = None) -> Execution:
    """
    Build the execution of the checker, it could be a custom checker or the
    default diff one.
    """
    if not extra_data:
        extra_data = dict()
    inputs = dict()
    if checker:
        checker.prepare(pool)
        cmd = checker
        args = ["input", "output", "contestant_output"]
        inputs["input"] = input
    else:
        cmd = "diff"
        args = ["-w", "output", "contestant_output"]
    inputs["output"] = correct_output
    inputs["contestant_output"] = output
    limits = Resources()
    limits.cpu_time = task.time_limit * 2
    limits.wall_time = task.time_limit * 1.5 * 2
    limits.memory = task.memory_limit_kb * 2
    return Execution(message,
                     pool,
                     cmd,
                     args,
                     "checking", {
                         "name": name,
                         "subtask": subtask,
                         "testcase": testcase,
                         **extra_data
                     },
                     cache_on=[CacheMode.ALL],
                     limits=limits,
                     inputs=inputs,
                     store_stdout=True,
                     store_stderr=True)
Exemplo n.º 14
0
def check_sample_cases(task: IOITask, pool: ExecutionPool,
                       interface: IOIUIInterface):
    """
    Check if the sample cases in the statement are valid and the output is
    correct
    """
    # Communication tasks does not have output files
    if task.task_type != TaskType.Batch:
        return

    # without official solution we cannot solve the input files
    if not task.official_solution:
        return

    inputs = list_files([
        "statement/input*.txt", "statement/{}.input*.txt".format(task.name),
        "testo/input*.txt", "testo/{}.input*.txt".format(task.name)
    ],
                        valid_extensions=[".txt"])
    outputs = list_files([
        "statement/output*.txt", "statement/{}.output*.txt".format(task.name),
        "testo/output*.txt", "testo/{}.output*.txt".format(task.name)
    ],
                         valid_extensions=[".txt"])
    num_to_input = dict()  # type: Dict[int, str]
    num_to_output = dict()  # type: Dict[int, str]
    num_to_input_file = dict()  # type: Dict[int, File]
    num_to_output_file = dict()  # type: Dict[int, File]
    num_to_sol_output_file = dict()  # type: Dict[int, File]
    num_to_validation = dict()  # type: Dict[int, File]

    for infile in inputs:
        match = re.match(r".*input(\d+).txt", infile)
        # invalid sample file format, skip it
        if not match:
            continue
        sample_num = int(match.group(1))
        num_to_input[sample_num] = infile
        num_to_input_file[sample_num] = pool.frontend.provideFile(
            infile, "Sample input {}".format(infile), False)
        # skip the validation if there is no default validator
        if not task.default_val:
            continue
        in_files = {VALIDATION_INPUT_NAME: num_to_input_file[sample_num]}
        validation = Execution("Validation of sample input {}".format(infile),
                               pool,
                               task.default_val.source_file,
                               [VALIDATION_INPUT_NAME, "0"],
                               "sanity-check-validation",
                               {"sample_testcase": sample_num},
                               inputs=in_files)
        num_to_validation[sample_num] = validation.stdout
        _setup_execution_callback(
            interface, validation,
            "Validation of sample input {} failed".format(infile))

    # if the output files were not yet generated (e.g. when they are just
    # copied), the solution is not prepared
    if not task.official_solution.prepared:
        task.official_solution.prepare(pool)

    for outfile in outputs:
        match = re.match(r".*output(\d+).txt", outfile)
        if not match:
            continue
        sample_num = int(match.group(1))
        # skip the output if there is no corresponding input
        if sample_num not in num_to_input:
            continue
        num_to_output[sample_num] = outfile
        num_to_output_file[sample_num] = pool.frontend.provideFile(
            outfile, "Sample output {}".format(outfile), False)
        in_files = dict()
        # if the validator is not present we don't wait for it
        if sample_num in num_to_validation:
            in_files["wait_for_validation"] = num_to_validation[sample_num]
        if task.input_file:
            in_files[task.input_file] = num_to_input_file[sample_num]
            stdin = None
        else:
            stdin = num_to_input_file[sample_num]
        out_files = []
        if task.output_file:
            out_files.append(task.output_file)

        solving = Execution("Solving sample output {}".format(outfile),
                            pool,
                            task.official_solution, [],
                            "sanity-check-solution",
                            {"sample_testcase": sample_num},
                            inputs=in_files,
                            stdin=stdin,
                            outputs=out_files)
        if task.output_file:
            num_to_sol_output_file[sample_num] = solving.output(
                task.output_file)
        else:
            num_to_sol_output_file[sample_num] = solving.stdout

        _setup_execution_callback(
            interface, solving, "Solution of sample input {} failed".format(
                num_to_input[sample_num]))

        check = get_checker_execution(
            pool, task, "", -1, sample_num, task.checker,
            num_to_input_file[sample_num], num_to_output_file[sample_num],
            num_to_sol_output_file[sample_num],
            "Checking sample output {}".format(outfile),
            {"sanity_check": True})

        _setup_checker_callback(
            interface, check,
            "Checking sample output {} failed".format(outfile), task.checker
            is not None)
Exemplo n.º 15
0
def evaluate_solution(pool: ExecutionPool, task: TerryTask,
                      solution: SourceFile, interface: TerryUIInterface):
    """
    Build the part of the DAG relative of a single solution.
    """
    if pool.config.seed:
        seed = pool.config.seed
    else:
        seed = random.randint(0, 2**31 - 1)
    name = solution.name

    inputs = dict()
    if task.official_solution:
        inputs[task.official_solution.
               exe_name] = task.official_solution.executable
    generation = Execution(
        "Generation of input for solution {} with seed {}".format(name, seed),
        pool,
        task.generator, [str(seed), "0"],
        "terry-generation", {
            "name": solution.name,
            "seed": seed
        },
        inputs=inputs,
        store_stderr=True)
    input = generation.stdout
    interface.add_generation(name, seed, generation)

    inputs = dict()
    if task.validator:
        val_inputs = dict()
        if task.official_solution:
            val_inputs[task.official_solution.
                       exe_name] = task.official_solution.executable
        validation = Execution(
            "Validation of input for solution {}".format(name),
            pool,
            task.validator, ["0"],
            "terry-validation", {
                "name": solution.name,
                "seed": seed
            },
            stdin=input,
            inputs=val_inputs,
            store_stderr=True)
        inputs["wait_for_validation"] = validation.stdout
        interface.add_validation(name, validation)

    limits = Resources()
    limits.cpu_time = 20
    limits.wall_time = 30
    limits.memory = 1024 * 1024
    solving = Execution("Running solution {}".format(name),
                        pool,
                        solution, [],
                        "terry-evaluation", {
                            "name": solution.name,
                            "seed": seed
                        },
                        limits=limits,
                        cache_on=[CacheMode.ALL],
                        can_exclusive=True,
                        stdin=input,
                        store_stderr=True)
    output = solving.stdout
    interface.add_solving(name, solving)

    inputs = {"input": input, "output": output}
    if task.official_solution:
        inputs[task.official_solution.
               exe_name] = task.official_solution.executable
    checker = Execution("Checking solution {}".format(name),
                        pool,
                        task.checker, ["input", "output"],
                        "terry-checking", {
                            "name": solution.name,
                            "seed": seed
                        },
                        inputs=inputs,
                        store_stderr=True,
                        store_stdout=True)
    interface.add_checking(name, checker)
Exemplo n.º 16
0
    def evaluate(
            self, testcase: int, subtask: int, input: File,
            validation: Optional[File],
            correct_output: Optional[File]) -> (List[Execution], Execution):
        group = self.solution.pool.frontend.addExecutionGroup(
            "Evaluation of %s on testcase %d" % (self.solution.name, testcase))

        pipes_m_2_sol = []  # type: List[Fifo]
        pipes_sol_2_m = []  # type: List[Fifo]
        pipes_m_2_sol_names = []  # type: List[str]
        pipes_sol_2_m_names = []  # type: List[str]
        for p in range(self.num_processes):
            pipes_m_2_sol.append(group.createFifo())
            pipes_sol_2_m.append(group.createFifo())
            pipes_m_2_sol_names.append("pipe_m_2_sol%d" % p)
            pipes_sol_2_m_names.append("pipe_sol%d_2_m" % p)

        executions = []
        for p, (p_in, p_out, p_in_name, p_out_name) in enumerate(
                zip(pipes_m_2_sol, pipes_sol_2_m, pipes_m_2_sol_names,
                    pipes_sol_2_m_names)):
            limits = Resources()
            limits.cpu_time = self.task.time_limit
            limits.wall_time = self.task.time_limit * 1.5
            limits.memory = self.task.memory_limit_kb
            inputs = dict()
            if validation:
                inputs["tm_wait_validation"] = validation
            inputs[p_in_name] = p_in
            inputs[p_out_name] = p_out
            exec = Execution(
                "Evaluation of %s (process %d) on testcase %d" %
                (self.solution.name, p, testcase),
                self.solution.pool,
                self.solution,
                [p_out_name, p_in_name, str(p)],
                "evaluation", {
                    "name": self.solution.name,
                    "process": p + 1,
                    "num_processes": self.num_processes,
                    "subtask": subtask,
                    "testcase": testcase
                },
                group=group,
                extra_time=self.config.extra_time,
                limits=limits,
                inputs=inputs)
            executions.append(exec)

        args = []
        for p_in_name, p_out_name in zip(pipes_sol_2_m_names,
                                         pipes_m_2_sol_names):
            args += [p_out_name, p_in_name]

        limits = Resources()
        limits.cpu_time = self.task.time_limit * self.num_processes
        limits.wall_time = limits.cpu_time * 1.5
        inputs = dict()
        for p_in, p_out, p_in_name, p_out_name in zip(pipes_m_2_sol,
                                                      pipes_sol_2_m,
                                                      pipes_m_2_sol_names,
                                                      pipes_sol_2_m_names):
            inputs[p_in_name] = p_in
            inputs[p_out_name] = p_out
        if self.task.input_file:
            inputs[self.task.input_file] = input
            stdin = None
        else:
            stdin = input
        outputs = []
        if self.task.output_file:
            outputs.append(self.task.output_file)

        manager = Execution(
            "Evaluation of %s (manager process) on testcase %d" %
            (self.solution.name, testcase),
            self.manager.pool,
            self.manager,
            args,
            "evaluation", {
                "name": self.solution.name,
                "process": 0,
                "num_processes": self.num_processes,
                "subtask": subtask,
                "testcase": testcase
            },
            group=group,
            limits=limits,
            inputs=inputs,
            outputs=outputs,
            stdin=stdin,
            can_exclusive=True,
            cache_on=[CacheMode.ALL],
            store_stdout=True,
            store_stderr=True)

        return executions, manager
Exemplo n.º 17
0
def fuzz_checker(config: Config):
    in_file, out_file = config.fuzz_checker
    if not os.path.exists(in_file):
        raise ValueError("The input file does not exists")
    if not os.path.exists(out_file):
        raise ValueError("The output file does not exists")

    from task_maker.formats.ioi_format import get_task
    task = get_task(config)
    if not task.checker:
        raise ValueError("This task does not have a checker")

    results_dir = os.path.join(config.cwd, "fuzz_checker_" + task.name)
    os.makedirs(results_dir, exist_ok=True)
    shutil.copy(in_file, os.path.join(results_dir, "input.txt"))
    shutil.copy(out_file, os.path.join(results_dir, "output.txt"))

    ui_printer = UIPrinter(Printer(), False)
    state = FuzzCheckerState()
    while True:
        state.batch_num += 1
        frontend = get_frontend(config)
        pool = ExecutionPool(config, frontend, ui_printer)

        input = frontend.provideFile(in_file, "Input file")
        output = frontend.provideFile(out_file, "Output file")
        task.checker.unprepare()
        task.checker.prepare(pool)

        for num in range(BATCH_SIZE):
            seed = random.randint(0, 10 ** 9)
            gen = Execution(
                "Generation of output %d of batch %d" % (num, state.batch_num),
                pool,
                "radamsa", ["--seed", str(seed), "-"],
                "fuzz-checker-radamsa",
                ui_print_data={
                    "batch": state.batch_num,
                    "num": num,
                    "seed": seed
                },
                cache_on=[],
                stdin=output,
                store_stdout_bytes=True)
            fuzz_output = gen.stdout
            check = Execution(
                "Checking output %d of batch %d" % (num, state.batch_num),
                pool,
                task.checker, ["input", "output", "fuzz"],
                "fuzz-checker-checker",
                ui_print_data={
                    "batch": state.batch_num,
                    "num": num,
                    "seed": seed
                },
                cache_on=[],
                inputs={
                    "input": input,
                    "output": output,
                    "fuzz": fuzz_output
                },
                store_stdout_bytes=True,
                store_stderr_bytes=True)
            state.bind(results_dir, gen, check)

        def compilation_on_done(res: Result):
            if res.status != ResultStatus.SUCCESS:
                print("Failed to compile the checker")
                print(task.checker.compilation.stderr_content)
                pool.stop()

        if task.checker.compilation:
            task.checker.compilation.bind(compilation_on_done)

        pool.start()
        print(state)
        if pool.stopped:
            return state.num_fails
Exemplo n.º 18
0
def generate_inputs(
    pool: ExecutionPool, task: IOITask, interface: IOIUIInterface
) -> (Dict[Tuple[int, int], File], Dict[Tuple[int, int], File], Dict[Tuple[
        int, int], File]):
    """
    Create the part of the DAG responsible for the input and output files. Will
    return 3 dicts: one for input, one for output and one for validations.
    Each dict has (subtask number, test case number) -> File
    """
    def add_non_solution(source: SourceFile):
        if not source.prepared:
            source.prepare(pool)
            interface.add_non_solution(source)

    inputs = dict()  # type: Dict[Tuple[int, int], File]
    outputs = dict()  # type: Dict[Tuple[int, int], File]
    validations = dict()  # type: Dict[Tuple[int, int], File]
    for st_num, subtask in task.subtasks.items():
        for tc_num, testcase in subtask.testcases.items():
            testcase_id = (st_num, tc_num)

            if testcase.validator:
                add_non_solution(testcase.validator.source_file)

            # static input file
            if testcase.input_file:
                try:
                    inputs[testcase_id] = pool.frontend.provideFile(
                        testcase.input_file, "Static input %d" % tc_num, False)

                    if testcase.validator:
                        val = Execution("Validation of input %d" % tc_num,
                                        pool,
                                        testcase.validator.source_file,
                                        testcase.validator.get_args(
                                            testcase, subtask, tc_num,
                                            st_num + 1),
                                        "validation", {
                                            "subtask": st_num,
                                            "testcase": tc_num
                                        },
                                        inputs={
                                            VALIDATION_INPUT_NAME:
                                            inputs[testcase_id]
                                        },
                                        store_stderr=True)
                        validations[testcase_id] = val.stdout

                        interface.add_validation(st_num, tc_num, val)
                except RuntimeError as ex:
                    interface.add_error(str(ex))
                    interface.subtasks[st_num][
                        tc_num].status = TestcaseGenerationStatus.FAILURE
                    continue
            # generate input file
            else:
                add_non_solution(testcase.generator.source_file)

                deps = dict()
                for dep in testcase.extra_deps:
                    deps[dep.name] = pool.frontend.provideFile(
                        dep.path, dep.path, False)
                gen = Execution("Generation of input %d" % tc_num,
                                pool,
                                testcase.generator.source_file,
                                testcase.generator_args,
                                "generation", {
                                    "subtask": st_num,
                                    "testcase": tc_num
                                },
                                inputs=deps,
                                store_stderr=True)
                inputs[testcase_id] = gen.stdout

                interface.add_generation(st_num, tc_num, gen)

                val = Execution(
                    "Validation of input %d" % tc_num,
                    pool,
                    testcase.validator.source_file,
                    testcase.validator.get_args(testcase, subtask, tc_num,
                                                st_num + 1),
                    "validation", {
                        "subtask": st_num,
                        "testcase": tc_num
                    },
                    inputs={VALIDATION_INPUT_NAME: inputs[testcase_id]},
                    store_stderr=True)
                validations[testcase_id] = val.stdout

                interface.add_validation(st_num, tc_num, val)

            if testcase.write_input_to and not pool.config.dry_run:
                inputs[testcase_id].getContentsToFile(testcase.write_input_to)

            if task.task_type == TaskType.Batch:
                # static output file
                if testcase.output_file:
                    outputs[testcase_id] = pool.frontend.provideFile(
                        testcase.output_file, "Static output %d" % tc_num,
                        False)
                else:
                    add_non_solution(task.official_solution)
                    deps = {"wait_for_validation": validations[testcase_id]}
                    if task.input_file:
                        deps[task.input_file] = inputs[testcase_id]
                        stdin = None
                    else:
                        stdin = inputs[testcase_id]
                    outs = []
                    if task.output_file:
                        outs.append(task.output_file)

                    sol = Execution("Generation of output %d" % tc_num,
                                    pool,
                                    task.official_solution, [],
                                    "solution", {
                                        "subtask": st_num,
                                        "testcase": tc_num
                                    },
                                    inputs=deps,
                                    outputs=outs,
                                    stdin=stdin,
                                    store_stderr=True)
                    if task.output_file:
                        outputs[testcase_id] = sol.output(task.output_file)
                    else:
                        outputs[testcase_id] = sol.stdout

                    interface.add_solving(st_num, tc_num, sol)

                if testcase.write_output_to and not pool.config.dry_run:
                    outputs[testcase_id].getContentsToFile(
                        testcase.write_output_to, True, True)
    if task.checker:
        add_non_solution(task.checker)
    return inputs, outputs, validations
Exemplo n.º 19
0
    def compile_booklet(
        pool: ExecutionPool,
        statements: List["OIITexStatement"],
        language: Optional[str] = None
    ) -> Tuple[Execution, File, List[StatementDepInfo]]:
        inputs = dict()
        other_executions = []  # type: List[StatementDepInfo]

        data_dir = os.path.join(get_template_dir(), "data")
        template_files = set(get_files(data_dir))

        packages = set()  # type: Set[str]
        statement_files = []  # type: List[str]
        task_names = list()  # type: List[str]
        for statement in statements:
            task_name = statement.task.name
            task_names.append(task_name)
            with open(statement.path, "r") as f:
                content = f.read()
            tex = extract_packages(content)
            packages |= set(tex.packages)
            task_tex_file = build_task_tex_file(pool.config, statement.task,
                                                tex.content, language)
            params = get_template_parameters(pool.config, statement.task,
                                             language)
            deps = get_dependencies(os.path.dirname(statement.path),
                                    task_tex_file, params.get("logo"))

            file = pool.frontend.provideFileContent(
                task_tex_file, "Statement file %s" % statement.name)

            inputs[os.path.join(task_name, statement.name)] = file
            statement_files.append(os.path.join(task_name, statement.name))

            for dep in deps:
                if os.path.join(data_dir, dep.name) in template_files:
                    # skip the template files
                    continue
                # non asy files, like images or other tex files, they are just
                # copied inside the sandbox
                if os.path.splitext(dep.path)[1] != ".asy":
                    file = pool.frontend.provideFile(
                        dep.path, "Statement dependency %s" % dep.name)
                    if os.path.join(task_name, dep.name) not in inputs:
                        inputs[os.path.join(task_name, dep.name)] = file
                    continue
                # the asymptote files needs to be compiled into pdf and then
                # cropped
                name = dep.name.replace(".asy", ".pdf")
                # compile the asy file like a normal source file
                source_file = SourceFile.from_file(dep.path, dep.name, False,
                                                   None, Arch.DEFAULT, dict())
                source_file.prepare(pool)
                other_executions.append(
                    StatementDepInfo(dep.name, source_file.compilation))
                # crop the pdf using pdfcrop
                crop = Execution("Crop compiled asy file %s" % dep.name,
                                 pool,
                                 "pdfcrop", ["file.pdf"],
                                 "asy-cropping", {"file": dep.name},
                                 inputs={"file.pdf": source_file.executable},
                                 outputs=["file-crop.pdf"])
                other_executions.append(
                    StatementDepInfo("Crop %s" % source_file.exe_name, crop))
                inputs[os.path.join(task_name,
                                    name)] = crop.output("file-crop.pdf")

        booklet_tex = build_contest_tex_file(pool.config, packages,
                                             statement_files, language)
        booklet = pool.frontend.provideFileContent(booklet_tex,
                                                   "Booklet source file")
        inputs["booklet.tex"] = booklet

        # add the template files to the sandbox
        for path in template_files:
            name = path[len(data_dir) + 1:]
            file = pool.frontend.provideFile(path, "Template file %s" % name)
            inputs[name] = file

        # TODO eventually use directly latexmk when setting env vars will be
        #  supported
        compilation = Execution(
            "Compilation of booklet",
            pool,
            "env", [
                "TEXINPUTS=.:%s:" % ":".join(task_names), "latexmk", "-f",
                "-interaction=nonstopmode", "-pdf", "booklet.tex"
            ],
            "statement-compilation", {"language": language},
            inputs=inputs,
            outputs=["booklet.pdf"])
        pdf_file = compilation.output("booklet.pdf")

        return compilation, pdf_file, other_executions
Exemplo n.º 20
0
class SourceFile:
    """
    A SourceFile contains a ref to a source file, this class will manage it's
    compilation and execution.
    """
    @staticmethod
    def from_file(path: str, unit_name: str, copy_executable: bool,
                  write_to: Optional[str], target_arch: Arch,
                  grader_map: Dict[Language, GraderInfo]) -> "SourceFile":
        """
        Handy constructor to build a SourceFile
        :param path: path to the source file
        :param unit_name: name of the unit of this source file. Usually the name
            of the task
        :param copy_executable: Whether to copy the executable into write_to
        :param write_to: Where to copy the executable, if copy_executable
        :param target_arch: Architecture to target the build
        :param grader_map: Map with the graders for all the languages
        """
        if copy_executable and not write_to:
            raise ValueError(
                "Asked to copy the executable but not specified where")
        old_path = path
        if not os.path.exists(path):
            path = find_executable(path)
        if not path:
            raise ValueError("Cannot find %s" % old_path)

        language = LanguageManager.from_file(path)
        dependencies = language.get_dependencies(path)
        grader = grader_map.get(language)
        exe_name = language.exe_name(path, write_to)
        source_file = SourceFile(path, unit_name, exe_name, dependencies,
                                 language, copy_executable and write_to,
                                 target_arch, grader)
        if not language.need_compilation:
            if not is_executable(source_file.path):
                raise ValueError("The file %s is not an executable. "
                                 "Please check the shebang (#!)" % path)
        return source_file

    def __init__(self, path: str, unit_name: str, exe_name: str,
                 dependencies: List[Dependency], language: Language,
                 write_bin_to: Optional[str], target_arch: Arch,
                 grader: Optional["GraderInfo"]):
        self.path = path
        self.unit_name = unit_name
        self.dependencies = dependencies
        self.language = language
        self.write_bin_to = write_bin_to
        self.target_arch = target_arch
        self.grader = grader
        self.name = os.path.basename(path)
        self.exe_name = exe_name
        self.pool = None  # type: ExecutionPool
        # set only after `prepare`
        self.executable = None  # type: Optional[File]
        self.compilation = None  # type: Optional[Execution]
        self.compilation_stderr = None  # type: Optional[File]
        self.compilation_stdout = None  # type: Optional[File]

    @property
    def prepared(self) -> bool:
        return self.pool is not None

    def unprepare(self):
        """
        Unprepare the source file. Useful for recompiling it in a different
        execution pool.
        """
        self.pool = None
        self.executable = None
        self.compilation = None
        self.compilation_stderr = None
        self.compilation_stdout = None

    def prepare(self, pool: ExecutionPool):
        """
        Prepare the source file for execution, compile the source if needed.
        After this call self.executable will be available. If the source file
        is to compile then compilation_stderr and compilation_stdout will be
        available too
        """
        if self.prepared:
            return
        self.pool = pool
        if self.language.need_compilation:
            self._compile()
        else:
            self._not_compile()
        if self.write_bin_to and not self.pool.config.dry_run:
            self.executable.getContentsToFile(self.write_bin_to, True, True)

    def _compile(self):
        compilation_files = [self.name]
        if self.grader:
            compilation_files += [d.name for d in self.grader.files]

        cmd_type, cmd = self.language.get_compilation_command(
            compilation_files, self.exe_name, self.unit_name, True,
            self.target_arch)

        if cmd_type != CommandType.SYSTEM:
            raise ValueError("Local file compilers are not supported yet")
        if not cmd:
            raise ValueError("Unexpected empty compiler command")

        if self.language.need_unit_name:
            source_name = self.unit_name + self.language.source_extensions[0]
        else:
            source_name = self.name
        inputs = {
            source_name:
            self.pool.frontend.provideFile(self.path,
                                           "Source file for " + self.name,
                                           False)
        }
        for dep in self.dependencies:
            inputs[dep.name] = self.pool.frontend.provideFile(
                dep.path, dep.path, False)
        if self.grader:
            for dep in self.grader.files:
                inputs[dep.name] = self.pool.frontend.provideFile(
                    dep.path, dep.path, False)
        self.compilation = Execution("Compilation of %s" % self.name,
                                     self.pool,
                                     cmd[0],
                                     cmd[1:],
                                     "compilation", {
                                         "file": self.name,
                                         "path": self.path
                                     },
                                     inputs=inputs,
                                     outputs=[(self.exe_name, True)],
                                     stdout_limit=COMPILATION_STDERR_LIMIT,
                                     stderr_limit=COMPILATION_STDERR_LIMIT,
                                     store_stderr=True,
                                     store_stdout=True)
        self.executable = self.compilation.output(self.exe_name)

    def _not_compile(self):
        self.executable = self.pool.frontend.provideFile(
            self.path, "Source file for " + self.name, True)

    def __repr__(self):
        return "<SourceFile path=%s language=%s>" % (self.path, self.language)