Пример #1
0
def _setup_execution_callback(interface: IOIUIInterface, execution: Execution,
                              error_message: str):
    def on_done(result: Result):
        if result.status != ResultStatus.SUCCESS:
            interface.add_error(error_message)

    execution.bind(on_done)
Пример #2
0
    def add_solving(self, solution: str, solving: Execution):
        """
        Start tracking the evaluation of a solution
        """
        info = self.solutions_info[solution]
        info.solution = solving

        def on_start():
            info.status = SolutionStatus.SOLVING

        def on_done(result: Result):
            if result.status == ResultStatus.SUCCESS:
                info.status = SolutionStatus.SOLVED
            else:
                self.add_error("Solution {} failed".format(solution))
                info.status = SolutionStatus.FAILED
                info.message = "Solution failed: " + result_to_str(result)

        solving.bind(on_done, on_start)
Пример #3
0
    def add_solving(self, subtask: int, testcase: int, solving: Execution):
        """
        Start tracking the execution of the official solution on a testcase
        """
        testcase_status = self.subtasks[subtask][testcase]
        testcase_status.solution = solving

        def on_start():
            testcase_status.status = TestcaseGenerationStatus.SOLVING

        def on_done(result: Result):
            if result.status == ResultStatus.SUCCESS:
                testcase_status.status = TestcaseGenerationStatus.DONE
            else:
                self.add_error("Failed to generate output of testcase #%d" %
                               testcase)
                testcase_status.status = TestcaseGenerationStatus.FAILURE

        solving.bind(on_done, on_start)
Пример #4
0
    def add_validation(self, subtask: int, testcase: int,
                       validation: Execution):
        """
        Start tracking the validation of a testcase
        """
        testcase_status = self.subtasks[subtask][testcase]
        testcase_status.validation = validation

        def on_start():
            testcase_status.status = TestcaseGenerationStatus.VALIDATING

        def on_done(result: Result):
            if result.status == ResultStatus.SUCCESS:
                testcase_status.status = TestcaseGenerationStatus.VALIDATED
            else:
                self.add_error("Failed to validate testcase #%d" % testcase)
                testcase_status.status = TestcaseGenerationStatus.FAILURE

        validation.bind(on_done, on_start)
Пример #5
0
    def bind(self, results_dir: str, generator: Execution, checker: Execution):
        def gen_on_done(res: Result):
            if res.status != ResultStatus.SUCCESS:
                raise RuntimeError("radamsa failed! %s %s" %
                                   (generator.name, result_to_dict(res)))
            self.num_tests += 1

        def check_on_done(res: Result):
            failed = False
            out = checker.stdout_content_bytes
            score = -1
            try:
                score = float(out.decode())
            except:
                failed = True
            if not 0.0 <= score <= 1.0:
                failed = True
            if res.status != ResultStatus.SUCCESS:
                failed = True

            if not failed:
                self.num_successes += 1
                return

            self.num_fails += 1
            dest_dir = os.path.join(results_dir, "fail%d" % self.num_fails)
            os.makedirs(dest_dir, exist_ok=True)
            with open(os.path.join(dest_dir, "fuzz_output"), "wb") as f:
                f.write(generator.stdout_content_bytes)
            with open(os.path.join(dest_dir, "checker_stdout"), "wb") as f:
                f.write(checker.stdout_content_bytes)
            with open(os.path.join(dest_dir, "checker_stderr"), "wb") as f:
                f.write(checker.stderr_content_bytes)
            with open(os.path.join(dest_dir, "data.json"), "w") as f:
                data = {
                    "data": checker.ui_print_data,
                    "result": result_to_dict(checker.result)
                }
                f.write(json.dumps(data, indent=4))

        generator.bind(gen_on_done)
        checker.bind(check_on_done)
Пример #6
0
def _setup_checker_callback(interface: IOIUIInterface, checking: Execution,
                            error_message: str, custom_checker: bool):
    def on_done(result: Result):
        if result.status != ResultStatus.SUCCESS:
            interface.add_error(error_message)
        if not custom_checker:
            return
        stdout = checking.stdout_content
        try:
            score = float(stdout)
        except ValueError:
            interface.add_error(error_message +
                                ": invalid score: {}".format(stdout))
            return
        if not 0.0 <= score <= 1.0:
            interface.add_error(error_message +
                                ": invalid score: {}".format(stdout))
            return

    checking.bind(on_done)
Пример #7
0
    def add_validation(self, solution: str, validation: Execution):
        """
        Start tracking the validation of a testcase
        """
        info = self.solutions_info[solution]
        info.validation = validation

        def on_start():
            info.status = SolutionStatus.VALIDATING

        def on_done(result: Result):
            if result.status == ResultStatus.SUCCESS:
                info.status = SolutionStatus.VALIDATED
            else:
                self.add_error(
                    "Failed to validate input for {}".format(solution))
                info.status = SolutionStatus.FAILED
                info.message = "Validator failed: " + result_to_str(result)

        validation.bind(on_done, on_start)
Пример #8
0
    def add_generation(self, solution: str, seed: int, generation: Execution):
        """
        Start tracking the generation of a testcase
        """
        info = self.solutions_info[solution]
        info.seed = seed
        info.generation = generation

        def on_start():
            info.status = SolutionStatus.GENERATING

        def on_done(result: Result):
            if result.status == ResultStatus.SUCCESS:
                info.status = SolutionStatus.GENERATED
            else:
                self.add_error(
                    "Failed to generate input for {} with seed {}".format(
                        solution, seed))
                info.status = SolutionStatus.FAILED
                info.message = "Generator failed: " + result_to_str(result)

        generation.bind(on_done, on_start)
Пример #9
0
    def add_checking(self, solution: str, checking: Execution):
        """
        Start the tracking of a checker
        """
        info = self.solutions_info[solution]
        info.checking = checking

        def on_start():
            info.status = SolutionStatus.CHECKING

        def on_done(result: Result):
            self._compute_score(solution, checking.stdout_content)
            self.ui_printer.terry_solution_outcome(solution, info)
            if result.status == ResultStatus.SUCCESS:
                info.status = SolutionStatus.DONE
            else:
                self.add_error(
                    "Checker failed on output of solution {}".format(solution))
                info.status = SolutionStatus.FAILED
                info.message = "Checker failed: " + result_to_str(result)

        checking.bind(on_done, on_start)
Пример #10
0
    def add_evaluate_checking(self, subtask: int, testcase: int, solution: str,
                              checking: Execution):
        """
        Start tracking the checking of a solution in a testcase
        """
        has_custom_checker = self.task.checker
        custom_checker_state = CustomCheckerState(solution)

        def on_start():
            self.testing[solution].testcase_results[subtask][
                testcase].status = TestcaseSolutionStatus.CHECKING

        def on_done(result: Result):
            if has_custom_checker:
                custom_checker_state.set_result(result)
                if result.status != ResultStatus.SUCCESS:
                    self.add_error(
                        "Checker failed for testcase #%d for solution %s" %
                        (testcase, solution))
                custom_checker_state.set_stdout(checking.stdout_content)
                custom_checker_state.set_stderr(checking.stderr_content)
            else:
                self.testing[solution].update_default_check_result(
                    subtask, testcase, result)
                self.ui_printer.testcase_outcome(
                    solution, testcase, subtask,
                    self.testing[solution].testcase_results[subtask][testcase])

        def on_checked():
            self.testing[solution].update_custom_check_result(
                subtask, testcase, custom_checker_state)
            self.ui_printer.testcase_outcome(
                solution, testcase, subtask,
                self.testing[solution].testcase_results[subtask][testcase])

        if has_custom_checker:
            custom_checker_state.set_callback(on_checked)
        checking.bind(on_done, on_start)