示例#1
0
    def test_get_submissions_operations_no_operations(self):
        """Test for submissions without operations to do."""
        # A submission for a different contest.
        self.add_submission()

        # A submission that failed compilation.
        self.add_submission_with_results(self.tasks[0], self.participation,
                                         False)

        # A submission completely evaluated.
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation, True)
        for result in results:
            for codename, testcase in iteritems(result.dataset.testcases):
                self.add_evaluation(result, testcase)

        # A submission reaching maximum tries for compilation
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation)
        for result in results:
            result.compilation_tries = 25

        # A submission reaching maximum tries for evaluation
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation, True)
        for result in results:
            result.evaluation_tries = 25

        self.session.flush()
        self.assertEqual(
            set(get_submissions_operations(self.session, self.contest.id)),
            set())
示例#2
0
    def test_get_submissions_operations_no_operations(self):
        """Test for submissions without operations to do."""
        # A submission for a different contest.
        self.add_submission()

        # A submission that failed compilation.
        self.add_submission_with_results(
            self.tasks[0], self.participation, False)

        # A submission completely evaluated.
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation, True)
        for result in results:
            for testcase in result.dataset.testcases.values():
                self.add_evaluation(result, testcase)

        # A submission reaching maximum tries for compilation
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation)
        for result in results:
            result.compilation_tries = 25

        # A submission reaching maximum tries for evaluation
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation, True)
        for result in results:
            result.evaluation_tries = 25

        self.session.flush()
        self.assertEqual(
            set(get_submissions_operations(self.session, self.contest.id)),
            set())
示例#3
0
    def test_get_submissions_operations_without_results(self):
        """Test for a submission without submission results."""
        submission = self.add_submission(self.tasks[0], self.participation)
        self.session.flush()

        expected_operations = set(
            self.submission_compilation_operation(submission, dataset)
            for dataset in submission.task.datasets if self.to_judge(dataset))

        self.assertEqual(
            set(get_submissions_operations(self.session, self.contest.id)),
            expected_operations)
示例#4
0
    def test_get_submissions_operations_without_results(self):
        """Test for a submission without submission results."""
        submission = self.add_submission(self.tasks[0], self.participation)
        self.session.flush()

        expected_operations = set(
            self.submission_compilation_operation(submission, dataset)
            for dataset in submission.task.datasets if self.to_judge(dataset))

        self.assertEqual(
            set(get_submissions_operations(self.session, self.contest.id)),
            expected_operations)
示例#5
0
    def test_get_submissions_operations_to_evaluate(self):
        """Test for a compiled submission."""
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation, True)
        self.session.flush()

        expected_operations = set(
            self.submission_evaluation_operation(result, codename)
            for result in results if self.to_judge(result.dataset)
            for codename in result.dataset.testcases)

        self.assertEqual(
            set(get_submissions_operations(self.session, self.contest.id)),
            expected_operations)
示例#6
0
    def test_get_submissions_operations_to_evaluate(self):
        """Test for a compiled submission."""
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation, True)
        self.session.flush()

        expected_operations = set(
            self.submission_evaluation_operation(result, codename)
            for result in results if self.to_judge(result.dataset)
            for codename in result.dataset.testcases)

        self.assertEqual(
            set(get_submissions_operations(self.session, self.contest.id)),
            expected_operations)
示例#7
0
    def test_get_submissions_operations_with_results_second_try(self):
        """Test for a submission with submission results."""
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation)
        for result in results:
            result.compilation_tries = 1
        self.session.flush()

        expected_operations = set(
            self.submission_compilation_operation(
                submission, result.dataset, result)
            for result in results if self.to_judge(result.dataset))

        self.assertEqual(
            set(get_submissions_operations(self.session, self.contest.id)),
            expected_operations)
示例#8
0
    def test_get_submissions_operations_with_results_second_try(self):
        """Test for a submission with submission results."""
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation)
        for result in results:
            result.compilation_tries = 1
        self.session.flush()

        expected_operations = set(
            self.submission_compilation_operation(submission, result.dataset,
                                                  result) for result in results
            if self.to_judge(result.dataset))

        self.assertEqual(
            set(get_submissions_operations(self.session, self.contest.id)),
            expected_operations)
示例#9
0
    def test_get_submissions_operations_mixed(self):
        """Test with many different submission statuses."""
        expected_operations = set()

        # A submission for a different contest.
        self.add_submission()

        # A submission that failed compilation.
        self.add_submission_with_results(self.tasks[0], self.participation,
                                         False)

        # A submission without results.
        submission = self.add_submission(self.tasks[0], self.participation)
        self.session.flush()
        expected_operations.update(
            set(
                self.submission_compilation_operation(submission, dataset)
                for dataset in submission.task.datasets
                if self.to_judge(dataset)))

        # A submission with results to be compiled.
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation)
        self.session.flush()
        expected_operations.update(
            set(
                self.submission_compilation_operation(submission, dataset)
                for dataset in submission.task.datasets
                if self.to_judge(dataset)))

        # A submission with results to be evaluated.
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation, True)
        self.session.flush()
        expected_operations.update(
            set(
                self.submission_evaluation_operation(result, codename)
                for result in results if self.to_judge(result.dataset)
                for codename in result.dataset.testcases))

        self.assertEqual(
            set(get_submissions_operations(self.session, self.contest.id)),
            expected_operations)
示例#10
0
    def test_get_submissions_operations_partially_evaluate(self):
        """Test for a submission with some evaluation present."""
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation, True)
        evaluated_codenames = set()
        for result in results:
            evaluated_codename = result.dataset.testcases.keys()[0]
            self.add_evaluation(
                result, result.dataset.testcases[evaluated_codename])
            evaluated_codenames.add(evaluated_codename)
        self.session.flush()

        expected_operations = set(
            self.submission_evaluation_operation(result, codename)
            for result in results if self.to_judge(result.dataset)
            for codename in result.dataset.testcases
            if codename not in evaluated_codenames)

        self.assertEqual(
            set(get_submissions_operations(self.session, self.contest.id)),
            expected_operations)
示例#11
0
    def test_get_submissions_operations_mixed(self):
        """Test with many different submission statuses."""
        expected_operations = set()

        # A submission for a different contest.
        self.add_submission()

        # A submission that failed compilation.
        self.add_submission_with_results(
            self.tasks[0], self.participation, False)

        # A submission without results.
        submission = self.add_submission(self.tasks[0], self.participation)
        self.session.flush()
        expected_operations.update(set(
            self.submission_compilation_operation(submission, dataset)
            for dataset in submission.task.datasets if self.to_judge(dataset)))

        # A submission with results to be compiled.
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation)
        self.session.flush()
        expected_operations.update(set(
            self.submission_compilation_operation(submission, dataset)
            for dataset in submission.task.datasets if self.to_judge(dataset)))

        # A submission with results to be evaluated.
        submission, results = self.add_submission_with_results(
            self.tasks[0], self.participation, True)
        self.session.flush()
        expected_operations.update(set(
            self.submission_evaluation_operation(result, codename)
            for result in results if self.to_judge(result.dataset)
            for codename in result.dataset.testcases))

        self.assertEqual(
            set(get_submissions_operations(self.session, self.contest.id)),
            expected_operations)