def test_judging(self): self.assertTrue(self.client.login(username='******')) contest = Contest.objects.get() url = reverse('submit', kwargs={'contest_id': contest.id}) # Show submission form response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertIn('contests/submit.html', [getattr(t, 'name', None) for t in response.templates]) form = response.context['form'] self.assertEqual(len(form.fields['problem_instance_id'].choices), 1) pi_id = form.fields['problem_instance_id'].choices[0][0] # Submit filename = get_test_filename('sum-various-results.cpp') response = self.client.post(url, { 'problem_instance_id': pi_id, 'file': open(filename, 'rb') }) self.assertEqual(response.status_code, 302) self.assertEqual(Submission.objects.count(), 1) self.assertEqual(TestReport.objects.count(), 6) self.assertEqual(TestReport.objects.filter(status='OK').count(), 4) self.assertEqual(TestReport.objects.filter(status='WA').count(), 1) self.assertEqual(TestReport.objects.filter(status='RE').count(), 1) submission = Submission.objects.get() self.assertEqual(submission.status, 'INI_OK') self.assertEqual(submission.score, IntegerScore(34)) urc = UserResultForContest.objects.get() self.assertEqual(urc.score, IntegerScore(34))
def threshold_linear_test_scorer(test, result): """Full score if took less than half of limit and then decreasing to 1""" limit = test.get('exec_time_limit', 0) used = result.get('time_used', 0) status = result['result_code'] percentage = result.get('result_percentage', 100) max_score = int(ceil(percentage * test['max_score'] / 100.)) test_max_score = IntegerScore(test['max_score']) if status != 'OK': return IntegerScore(0), test_max_score, status if not limit: return IntegerScore(max_score), test_max_score, status if used > limit: score = 0 status = 'TLE' elif max_score == 0: score = 0 elif used <= limit / 2.: score = max_score else: score = 1 + int((max_score - 1) * ((limit - used) / (limit / 2.))) return IntegerScore(score), test_max_score, status
def _score_question(submission, submission_report, question, problem_instance): points = question.points question_report = QuestionReport( submission_report=submission_report, question=question, question_max_score=points, score=IntegerScore(0), ) award_points = False ignore_question = True if question.is_text_input: text_answers = submission.quizsubmissiontextanswer_set.filter(question=question) if text_answers.exists(): text_answer = text_answers.get().text_answer correct_answers = question.quizanswer_set.filter(is_correct=True) award_points = any(_match_text_input(question, text_answer, answer.answer, problem_instance) for answer in correct_answers) ignore_question = False else: submitted_answers = submission.quizsubmissionanswer_set.filter(answer__question=question) if submitted_answers.exists(): award_points = all(_is_answer_correct(answer) for answer in submitted_answers) ignore_question = False if not ignore_question: if award_points: question_report.score = IntegerScore(points) question_report.status = 'OK' question_report.save() return (points, ignore_question) if award_points else (0, ignore_question)
def _create_score_report(max_score, score, submission_report): ScoreReport.objects.create( submission_report=submission_report, score=IntegerScore(score), status='OK', max_score=IntegerScore(max_score), )
def _score_question(submission, submission_report, question): points = question.points question_report = QuestionReport( submission_report=submission_report, question=question, question_max_score=points, score=IntegerScore(0), ) award_points = False if question.is_text_input: text_answer = submission.quizsubmissiontextanswer_set\ .get(question=question).text_answer correct_answers = question.quizanswer_set.filter(is_correct=True) award_points = any( _match_text_input(question, text_answer, answer.answer) for answer in correct_answers) else: submitted_answers = submission.quizsubmissionanswer_set\ .filter(answer__question=question) award_points = all( _is_answer_correct(answer) for answer in submitted_answers) if award_points: question_report.score = IntegerScore(points) question_report.status = 'OK' question_report.save() return points question_report.save() return 0
def test_pa_score(self): score = [PAScore(IntegerScore(x)) for x in range(0, 11)] self.assertLess(score[0], score[5]) self.assertLess(score[5], score[10]) self.assertLess(score[5] + score[5], score[10]) self.assertLess(score[5] + score[5], score[2] + score[2] + score[6]) self.assertLess(score[10], score[2] + score[4] + score[5]) self.assertLess(score[2] + score[2] + score[6], score[1] + score[3] + score[6]) dist1 = ScoreDistribution([0] * 8 + [2, 4]) dist2 = ScoreDistribution([0] * 8 + [1, 6]) score1 = PAScore(IntegerScore(8), dist1) score2 = PAScore(IntegerScore(8), dist2) self.assertLess(score2, score1) score3 = score[10] + score[10] + score[10] + score[4] + score[2] + \ score1 + score2 self.assertEqual(score3, (3 * 10 + 4 + 2 + 2 * 8)) self.assertEqual( repr(score3), 'PAScore(IntegerScore(52), ScoreDistribution(10: 3, 9: 0, 8: ' '0, 7: 0, 6: 0, 5: 0, 4: 1, 3: 0, 2: 4, 1: 10))') self.assertEqual( score3._to_repr(), '0000000000000000052;00003:00000:' '00000:00000:00000:00000:00001:00000:00004:00010') self.assertEqual(score3, PAScore._from_repr(score3._to_repr()))
class TestPAScorer(TestCase): t_results_ok = ( ({ 'exec_time_limit': 100, 'max_score': 100 }, { 'result_code': 'OK', 'time_used': 0 }), ({ 'exec_time_limit': 100, 'max_score': 10 }, { 'result_code': 'OK', 'time_used': 99 }), ({ 'exec_time_limit': 1000, 'max_score': 0 }, { 'result_code': 'OK', 'time_used': 123 }), ) t_expected_ok = [ (IntegerScore(1), IntegerScore(1), 'OK'), (IntegerScore(1), IntegerScore(1), 'OK'), (IntegerScore(0), IntegerScore(0), 'OK'), ] t_results_wrong = [ ({ 'exec_time_limit': 100, 'max_score': 100 }, { 'result_code': 'WA', 'time_used': 75 }), ({ 'exec_time_limit': 100, 'max_score': 0 }, { 'result_code': 'RV', 'time_used': 75 }), ] t_expected_wrong = [ (IntegerScore(0), IntegerScore(1), 'WA'), (IntegerScore(0), IntegerScore(0), 'RV'), ] def test_pa_test_scorer(self): results = map(utils.pa_test_scorer, *zip(*self.t_results_ok)) self.assertEquals(self.t_expected_ok, results) results = map(utils.pa_test_scorer, *zip(*self.t_results_wrong)) self.assertEquals(self.t_expected_wrong, results)
class PAScore(ScoreValue): """PA style score. It consists of a number of points scored, together with their distribution. When two users get the same number of points, then the number of tasks for which they got 10pts (maximal score) is taken into consideration. If this still does not break the tie, number of 9 point scores is considered, then 8 point scores etc. """ symbol = 'PA' def __init__(self, points=None, distribution=None): if points: assert isinstance(points, IntegerScore) self.points = points else: self.points = IntegerScore(0) if distribution: assert isinstance(distribution, ScoreDistribution) self.distribution = distribution else: self.distribution = ScoreDistribution() self.distribution.update(self.points.value) def __add__(self, other): return PAScore(self.points + other.points, self.distribution + other.distribution) def __eq__(self, other): if not isinstance(other, PAScore): return self.points == other return (self.points, self.distribution) == \ (other.points, other.distribution) def __lt__(self, other): if not isinstance(other, PAScore): return self.points < other return (self.points, self.distribution) < \ (other.points, other.distribution) def __unicode__(self): return six.text_type(self.points) def __repr__(self): return "PAScore(%r, %r)" % (self.points, self.distribution) @classmethod def _from_repr(cls, value): points, distribution = value.split(';') return cls(points=IntegerScore._from_repr(points), distribution=ScoreDistribution._from_repr(distribution)) def _to_repr(self): return '%s;%s' % (self.points._to_repr(), self.distribution._to_repr()) def to_int(self): return self.points.to_int()
def test_integer_score(self): s1 = IntegerScore(1) s2 = IntegerScore(2) self.assertLess(s1, s2) self.assertGreater(s2, s1) self.assertEqual(s1, IntegerScore(1)) self.assertEqual((s1 + s2).value, 3) self.assertEqual(unicode(s1), '1') self.assertEqual(IntegerScore._from_repr(s1._to_repr()), s1)
def __init__(self, points=None, distribution=None): if points: assert isinstance(points, IntegerScore) self.points = points else: self.points = IntegerScore(0) if distribution: assert isinstance(distribution, ScoreDistribution) self.distribution = distribution else: self.distribution = ScoreDistribution() self.distribution.update(self.points.value)
def smart_score_copier(apps, schema_editor): Contest = apps.get_model('contests', 'Contest') ProblemInstance = apps.get_model('contests', 'ProblemInstance') Submission = apps.get_model('contests', 'Submission') SubmissionReport = apps.get_model('contests', 'SubmissionReport') TestReport = apps.get_model('programs', 'TestReport') db_alias = schema_editor.connection.alias # Firstly, all max_scores will be set as equal to test_max_scores # provided that they are not None – this is the behaviour used # all contests except for the Algorithmic Engagements # and the ACM type contests. # This operates on raw, serialized data, which is a bit dirty but works. TestReport.objects.using(db_alias).filter(test_max_score__isnull=False) \ .update( max_score=Concat( V('int:'), Substr( Concat(V('0000000000000000000'), 'test_max_score'), Length(Concat(V('0000000000000000000'), 'test_max_score')) - 18, 19 ) ) ) # Secondly, all max_scores related to the Algorithmic Engagements # will be set to either 1 or 0, the same way they are defined # in pa_test_scorer from oioioi/pa/utils.py pa_test_reports = TestReport.objects.using(db_alias).filter( submission_report__submission__problem_instance__contest__controller_name ='oioioi.pa.controllers.PAContestController', test_max_score__isnull=False) pa_test_reports.update(max_score=IntegerScore(1)) pa_test_reports.filter(test_max_score=0).update(max_score=IntegerScore(0)) # In the end, all max_scores related to the ACM type contests will be left # as none, which agrees with their behaviour defined in the ACM contest # controller. acm_test_reports = TestReport.objects.using(db_alias).filter( submission_report__submission__problem_instance__contest__controller_name ='oioioi.acm.controllers.ACMContestController') acm_test_reports.update(max_score=None)
def _check_interactive_package(self, problem): self.assertEqual(problem.short_name, 'arc') config = ExtraConfig.objects.get(problem=problem) assert len(config.parsed_config['extra_compilation_args']) == 2 assert len(config.parsed_config['extra_compilation_files']) == 3 self.assertEqual(problem.name, u'arc') tests = Test.objects.filter(problem_instance=problem.main_problem_instance) t0 = tests.get(name='0') self.assertEqual(t0.input_file.read(), b'3\n12\n5\n8\n3\n15\n8\n0\n') self.assertEqual(t0.output_file.read(), b'12\n15\n8\n') self.assertEqual(t0.kind, 'EXAMPLE') self.assertEqual(t0.group, '0') self.assertEqual(t0.max_score, 0) self.assertEqual(t0.time_limit, DEFAULT_TIME_LIMIT) self.assertEqual(t0.memory_limit, 66000) t1a = tests.get(name='1a') self.assertEqual( t1a.input_file.read(), b'0\n-435634223 1 30 23 130 0 -324556462\n' ) self.assertEqual( t1a.output_file.read(), b"""126\n126\n82\n85\n80\n64\n84\n5\n128\n66\n4\n79\n64\n96 22\n107\n84\n112\n92\n63\n125\n82\n1\n""", ) self.assertEqual(t1a.kind, 'NORMAL') self.assertEqual(t1a.group, '1') self.assertEqual(t1a.max_score, 50) t2a = tests.get(name='2a') self.assertEqual( t2a.input_file.read(), b'0\n-435634223 1 14045 547 60000 0 -324556462\n' ) self.assertEqual(t2a.kind, 'NORMAL') self.assertEqual(t2a.group, '2') self.assertEqual(t2a.max_score, 50) checker = OutputChecker.objects.get(problem=problem) self.assertIsNotNone(checker.exe_file) extra_files = ExtraFile.objects.filter(problem=problem) self.assertEqual(extra_files.count(), 3) model_solutions = ModelSolution.objects.filter(problem=problem).order_by( 'order_key' ) solc = model_solutions.get(name='arc.c') self.assertEqual(solc.kind, 'NORMAL') solcpp = model_solutions.get(name='arc1.cpp') self.assertEqual(solcpp.kind, 'NORMAL') solpas = model_solutions.get(name='arc2.pas') self.assertEqual(solpas.kind, 'NORMAL') self.assertEqual(list(model_solutions), [solc, solcpp, solpas]) submissions = Submission.objects.all() for s in submissions: self.assertEqual(s.status, 'INI_OK') self.assertEqual(s.score, IntegerScore(100))
def test_score_field(self): contest = Contest.objects.get() user = User.objects.get(username='******') instance = UserResultForContest(user=user, contest=contest, score=IntegerScore(42)) instance.save() del instance instance = UserResultForContest.objects.get() self.assertTrue(isinstance(instance.score, IntegerScore)) self.assertEqual(instance.score.value, 42) instance.score = "int:12" self.assertEqual(instance.score.value, 12) with self.assertRaises(ValidationError): instance.score = "1" with self.assertRaises(ValidationError): instance.score = "foo:1" instance.score = None instance.save() del instance instance = UserResultForContest.objects.get() self.assertIsNone(instance.score)
def grade_tests(env, **kwargs): """Grades tests using a scoring function. The ``env['test_scorer']``, which is used by this ``Handler``, should be a path to a function which gets test definition (e.g. a ``env['tests'][test_name]`` dict) and test run result (e.g. a ``env['test_results'][test_name]`` dict) and returns a score (instance of some subclass of :class:`~oioioi.contests.scores.ScoreValue`) and a status. Used ``environ`` keys: * ``tests`` * ``test_results`` * ``test_scorer`` Produced ``environ`` keys: * `score`, `max_score` and `status` keys in ``env['test_result']`` """ fun = get_object_by_dotted_name(env.get('test_scorer') or DEFAULT_TEST_SCORER) tests = env['tests'] for test_name, test_result in env['test_results'].iteritems(): if tests[test_name]['to_judge']: score, max_score, status = fun(tests[test_name], test_result) assert isinstance(score, (types.NoneType, ScoreValue)) assert isinstance(max_score, (types.NoneType, ScoreValue)) test_result['score'] = score and score.serialize() test_result['max_score'] = max_score and max_score.serialize() test_result['status'] = status else: report = TestReport.objects.get( submission_report__submission__id=env['submission_id'], submission_report__status='ACTIVE', test_name=test_name) score = report.score max_score = IntegerScore(report.test_max_score) status = report.status time_used = report.time_used test_result['score'] = score and score.serialize() test_result['max_score'] = max_score and max_score.serialize() test_result['status'] = status test_result['time_used'] = time_used env['test_results'][test_name] = test_result return env
def grade_tests(env, **kwargs): """Grades tests using a scoring function. The ``env['test_scorer']``, which is used by this ``Handler``, should be a path to a function which gets test definition (e.g. a ``env['tests'][test_name]`` dict) and test run result (e.g. a ``env['test_results'][test_name]`` dict) and returns a score (instance of some subclass of :class:`~oioioi.contests.scores.ScoreValue`) and a status. Used ``environ`` keys: * ``tests`` * ``test_results`` * ``test_scorer`` Produced ``environ`` keys: * `score`, `max_score` and `status` keys in ``env['test_result']`` """ fun = import_string(env.get('test_scorer') or settings.DEFAULT_TEST_SCORER) tests = env['tests'] for test_name, test_result in six.iteritems(env['test_results']): if tests[test_name]['to_judge']: score, max_score, status = fun(tests[test_name], test_result) assert isinstance(score, (type(None), ScoreValue)) assert isinstance(max_score, (type(None), ScoreValue)) test_result['score'] = score and score.serialize() test_result['max_score'] = max_score and max_score.serialize() test_result['status'] = status else: report = TestReport.objects.get( submission_report__submission__id=env['submission_id'], submission_report__status='ACTIVE', test_name=test_name) score = report.score max_score = IntegerScore(report.test_max_score) status = report.status time_used = report.time_used test_result['score'] = score and score.serialize() test_result['max_score'] = max_score and max_score.serialize() test_result['status'] = status test_result['time_used'] = time_used env['test_results'][test_name] = test_result return env
def create_score(username, score): user = User.objects.create_user(username, username + '@example.pl', username) result = UserResultForProblem() result.user = user result.problem_instance = pis result.status = 'OK' result.score = IntegerScore(score) result.save() return user
def _score_question(submission, submission_report, question): points = question.points question_report = QuestionReport( submission_report=submission_report, question=question, question_max_score=points, score=IntegerScore(0), ) submitted_answers = submission.quizsubmissionanswer_set\ .filter(answer__question=question) are_all_answers_correct = all( _is_answer_correct(answer) for answer in submitted_answers) if are_all_answers_correct: question_report.score = IntegerScore(points) question_report.status = 'OK' question_report.save() return points question_report.save() return 0
def test_score_field(self): instance = ScoreFieldTestModel(score=IntegerScore(42)) instance.save() del instance instance = ScoreFieldTestModel.objects.get() self.assertTrue(isinstance(instance.score, IntegerScore)) self.assertEqual(instance.score.value, 42) instance.score = "int:12" self.assertEqual(instance.score.value, 12) with self.assertRaises(ValidationError): instance.score = "1" with self.assertRaises(ValidationError): instance.score = "foo:1" instance.score = None instance.save() del instance instance = ScoreFieldTestModel.objects.get() self.assertIsNone(instance.score)
def discrete_test_scorer(test, result): status = result['result_code'] score = (status == 'OK') and test['max_score'] or 0 return IntegerScore(score), status
def _from_repr(cls, value): points, distribution = value.split(';') return cls(points=IntegerScore._from_repr(points), distribution=ScoreDistribution._from_repr(distribution))
def pa_score_factory(int_score): return PAScore(IntegerScore(int_score))
def _from_repr(cls, value): points, distribution = value.split(';') return cls( points=IntegerScore._from_repr(points), distribution=ScoreDistribution._from_repr(distribution), )
class TestScorers(TestCase): t_results_ok = ( ({'exec_time_limit': 100, 'max_score': 100}, {'result_code': 'OK', 'time_used': 0}), ({'exec_time_limit': 100, 'max_score': 100}, {'result_code': 'OK', 'time_used': 50}), ({'exec_time_limit': 1000, 'max_score': 100}, {'result_code': 'OK', 'time_used': 501}), ({'exec_time_limit': 100, 'max_score': 100}, {'result_code': 'OK', 'time_used': 75}), ({'exec_time_limit': 1000, 'max_score': 100}, {'result_code': 'OK', 'time_used': 999}), ({'max_score': 100}, {'result_code': 'OK', 'time_used': 0}), ({'max_score': 100}, {'result_code': 'OK', 'time_used': 99999}), ) t_results_ok_perc = ( ({'exec_time_limit': 100, 'max_score': 100}, {'result_code': 'OK', 'time_used': 0, 'result_percentage': 99}), ({'exec_time_limit': 100, 'max_score': 100}, {'result_code': 'OK', 'time_used': 75, 'result_percentage': 50}), ({'exec_time_limit': 100, 'max_score': 100}, {'result_code': 'OK', 'time_used': 75, 'result_percentage': 0}), ) t_results_wrong = [ ({'exec_time_limit': 100, 'max_score': 100}, {'result_code': 'WA', 'time_used': 75}), ({'exec_time_limit': 100, 'max_score': 100}, {'result_code': 'RV', 'time_used': 75}), ] t_expected_wrong = [ (IntegerScore(0), 'WA'), (IntegerScore(0), 'RV'), ] def test_discrete_test_scorer(self): exp_scores = [100] * len(self.t_results_ok) exp_statuses = ['OK'] * len(self.t_results_ok) expected = zip(exp_scores, exp_statuses) results = map(utils.discrete_test_scorer, *zip(*self.t_results_ok)) self.assertEquals(expected, results) results = map(utils.discrete_test_scorer, *zip(*self.t_results_wrong)) self.assertEquals(self.t_expected_wrong, results) def test_threshold_linear_test_scorer(self): exp_scores = [100, 100, 99, 50, 0, 100, 100] exp_statuses = ['OK'] * len(self.t_results_ok) expected = zip(exp_scores, exp_statuses) results = map(utils.threshold_linear_test_scorer, *zip(*self.t_results_ok)) self.assertEquals(expected, results) exp_scores = [99, 25, 0] exp_statuses = ['OK'] * len(self.t_results_ok_perc) expected = zip(exp_scores, exp_statuses) results = map(utils.threshold_linear_test_scorer, *zip(*self.t_results_ok_perc)) self.assertEquals(expected, results) malformed = ({'exec_time_limit': 100, 'max_score': 100}, {'result_code': 'OK', 'time_used': 101}) self.assertEqual(utils.threshold_linear_test_scorer(*malformed), (0, 'TLE')) results = map(utils.threshold_linear_test_scorer, *zip(*self.t_results_wrong)) self.assertEquals(self.t_expected_wrong, results) @memoized_property def g_results_ok(self): # Tested elsewhere results = map(utils.threshold_linear_test_scorer, *zip(*self.t_results_ok[:4])) dicts = [dict(score=sc.serialize(), status=st) for sc, st in results] return dict(zip(xrange(len(dicts)), dicts)) @memoized_property def g_results_wrong(self): results = map(utils.threshold_linear_test_scorer, *zip(*self.t_results_wrong)) dicts = self.g_results_ok.values() dicts += [dict(score=sc.serialize(), status=st) for sc, st in results] return dict(zip(xrange(len(dicts)), dicts)) def test_min_group_scorer(self): self.assertEqual((50, 'OK'), utils.min_group_scorer(self.g_results_ok)) self.assertEqual((0, 'WA'), utils.min_group_scorer(self.g_results_wrong)) def test_sum_group_scorer(self): self.assertEqual((349, 'OK'), utils.sum_group_scorer(self.g_results_ok)) self.assertEqual((349, 'WA'), utils.sum_group_scorer(self.g_results_wrong)) def test_sum_score_aggregator(self): self.assertEqual((349, 'OK'), utils.sum_score_aggregator(self.g_results_ok)) self.assertEqual((349, 'WA'), utils.sum_score_aggregator(self.g_results_wrong))
def pa_test_scorer(test, result): status = result['result_code'] max_score = min(1, test['max_score']) score = max_score if status == 'OK' else 0 return IntegerScore(score), IntegerScore(max_score), status
def discrete_test_scorer(test, result): status = result['result_code'] max_score = test['max_score'] score = max_score if status == 'OK' else 0 return IntegerScore(score), IntegerScore(max_score), status