Пример #1
0
    def test_handler_presence(self):
        contest = Contest.objects.get()
        submission = Submission.objects.get()
        controller = ProgrammingContestController(contest)

        env = create_environ()
        env.setdefault('recipe', []).append(('dummy', 'dummy'))
        env['extra_args'] = []
        controller.fill_evaluation_environ(env, submission)
        controller.finalize_evaluation_environment(env)

        self.assertIn(
            (
                'check_problem_instance_state',
                'oioioi.suspendjudge.handlers.check_problem_instance_state',
                dict(suspend_init_tests=True),
            ),
            env['recipe'],
        )
        self.assertIn(
            (
                'check_problem_instance_state',
                'oioioi.suspendjudge.handlers.check_problem_instance_state',
            ),
            env['recipe'],
        )
Пример #2
0
 def _prepare(self):
     SavedEnviron.objects.all().delete()
     TestAsyncJobs.transferred_environs = []
     env = create_environ()
     env.setdefault('recipe', []).append(
         ('transfer', 'oioioi.evalmgr.tests.tests._call_transfer'))
     env['resumed'] = False
     return env
Пример #3
0
 def _prepare(self):
     SavedEnviron.objects.all().delete()
     TestAsyncJobs.transferred_environs = []
     env = create_environ()
     env.setdefault('recipe', []).append((
             'transfer',
             'oioioi.evalmgr.tests.tests._call_transfer'))
     env['resumed'] = False
     return env
Пример #4
0
    def test_add_handlers(self):
        """Test if the proper handlers are added to the recipe."""
        contest = Contest.objects.get()
        controller = AddHandlersController(contest)
        env = create_environ()
        env.setdefault('recipe', []).append(('dummy', 'dummy'))
        controller.finalize_evaluation_environment(env)

        self.assertIn(('remove_queuedjob_on_error',
                'oioioi.evalmgr.handlers.remove_queuedjob_on_error'),
                env['error_handlers'])
Пример #5
0
    def test_add_handlers(self):
        """Test if the proper handlers are added to the recipe."""
        contest = Contest.objects.get()
        controller = AddHandlersController(contest)
        env = create_environ()
        env.setdefault('recipe', []).append(('dummy', 'dummy'))
        controller.finalize_evaluation_environment(env)

        self.assertIn(('remove_queuedjob_on_error',
                'oioioi.evalmgr.handlers.remove_queuedjob_on_error'),
                env['error_handlers'])
Пример #6
0
    def judge(self, submission, extra_args=None, is_rejudge=False):
        environ = create_environ()
        environ['extra_args'] = extra_args or {}
        environ['is_rejudge'] = is_rejudge
        picontroller = submission.problem_instance.controller

        picontroller.fill_evaluation_environ(environ, submission)

        extra_steps = [
            ('update_report_statuses',
             'oioioi.contests.handlers.update_report_statuses'),
            ('update_submission_score',
             'oioioi.contests.handlers.update_submission_score'),
            ('update_user_results',
             'oioioi.contests.handlers.update_user_results'),
        ] + ([
            ('update_problem_statistics',
             'oioioi.contests.handlers.update_problem_statistics'),
        ] if settings.PROBLEM_STATISTICS_AVAILABLE else []) + [
            ('call_submission_judged',
             'oioioi.contests.handlers.call_submission_judged'),
            ('dump_final_env', 'oioioi.evalmgr.handlers.dump_env',
             dict(message='Finished evaluation')),
        ]

        environ.setdefault('error_handlers', [])
        environ['error_handlers'].append(
            ('create_error_report',
             'oioioi.contests.handlers.create_error_report'))

        if settings.MAIL_ADMINS_ON_GRADING_ERROR:
            environ['error_handlers'].append(
                ('mail_admins_on_error',
                 'oioioi.contests.handlers.mail_admins_on_error'))

        environ['error_handlers'].extend(extra_steps)
        environ['error_handlers'].append(
            ('error_handled', 'oioioi.evalmgr.handlers.error_handled'))

        environ['recipe'].extend(extra_steps)

        picontroller.finalize_evaluation_environment(environ)

        environ['recipe'].insert(
            0, ('wait_for_submission_in_db',
                'oioioi.contests.handlers.wait_for_submission_in_db'))

        evalmgr_extra_args = environ.get('evalmgr_extra_args', {})
        logger.debug("Judging submission #%d with environ:\n %s",
                     submission.id, pprint.pformat(environ, indent=4))
        delay_environ(environ, **evalmgr_extra_args)
Пример #7
0
    def test_handlers(self):
        submission = TestRunProgramSubmission.objects.get(pk=1)

        environ = create_environ()
        environ['job_id'] = 'job_id'
        environ['submission_id'] = submission.id
        environ['submission_kind'] = submission.kind
        environ['problem_instance_id'] = submission.problem_instance.id
        environ['problem_id'] = submission.problem.id
        environ['round_id'] = submission.problem_instance.round.id
        environ['contest_id'] = submission.problem_instance.id

        # Simulate successful compilation
        environ['compilation_result'] = 'OK'
        environ['compilation_message'] = ''

        environ = handlers.make_test(environ)

        self.assertIn('tests', environ)
        self.assertIn('test', environ['tests'])
        self.assertIn('in_file', environ['tests']['test'])

        # Simulate running tests
        FiletrackerStorage().save('output', ContentFile('o'))
        try:
            environ['test_results'] = {}
            environ['test_results']['test'] = {
                'result_cpode': 'OK',
                'result_string': 'OK',
                'time_used': 111,
                'out_file': '/output'
            }

            environ = handlers.grade_submission(environ)

            self.assertEqual(None, environ['score'])
            self.assertEqual('OK', environ['status'])

            environ = handlers.make_report(environ)
            self.assertIn('report_id', environ)
            report = TestRunReport.objects.get(
                                    submission_report=environ['report_id'])
            self.assertEqual(111, report.time_used)
            self.assertEqual('', report.comment)
            self.assertEqual('o', report.output_file.read())

            handlers.delete_output(environ)
        except Exception:
            get_client().delete_file('/output')
Пример #8
0
    def test_handlers(self):
        submission = TestRunProgramSubmission.objects.get(pk=1)

        environ = create_environ()
        environ['job_id'] = 'job_id'
        environ['submission_id'] = submission.id
        environ['submission_kind'] = submission.kind
        environ['problem_instance_id'] = submission.problem_instance.id
        environ['problem_id'] = submission.problem.id
        environ['round_id'] = submission.problem_instance.round.id
        environ['contest_id'] = submission.problem_instance.id

        # Simulate successful compilation
        environ['compilation_result'] = 'OK'
        environ['compilation_message'] = ''

        environ = handlers.make_test(environ)

        self.assertIn('tests', environ)
        self.assertIn('test', environ['tests'])
        self.assertIn('in_file', environ['tests']['test'])

        # Simulate running tests
        FiletrackerStorage().save('output', ContentFile('o'))
        try:
            environ['test_results'] = {}
            environ['test_results']['test'] = {
                'result_cpode': 'OK',
                'result_string': 'OK',
                'time_used': 111,
                'out_file': '/output'
            }

            environ = handlers.grade_submission(environ)

            self.assertEqual(None, environ['score'])
            self.assertEqual('OK', environ['status'])

            environ = handlers.make_report(environ)
            self.assertIn('report_id', environ)
            report = TestRunReport.objects.get(
                                    submission_report=environ['report_id'])
            self.assertEqual(111, report.time_used)
            self.assertEqual('', report.comment)
            self.assertEqual('o', report.output_file.read())

            handlers.delete_output(environ)
        except Exception:
            get_client().delete_file('/output')
Пример #9
0
    def test_handler_presence(self):
        contest = Contest.objects.get()
        submission = Submission.objects.get()
        controller = ProgrammingContestController(contest)

        env = create_environ()
        env.setdefault('recipe', []).append(('dummy', 'dummy'))
        env['extra_args'] = []
        controller.fill_evaluation_environ(env, submission)
        controller.finalize_evaluation_environment(env)

        self.assertIn(('check_problem_instance_state',
                'oioioi.suspendjudge.handlers.check_problem_instance_state',
                dict(suspend_init_tests=True)), env['recipe'])
        self.assertIn(('check_problem_instance_state',
                'oioioi.suspendjudge.handlers.check_problem_instance_state'),
                env['recipe'])
Пример #10
0
    def _test(self, controller_name):
        contest = Contest.objects.get()
        contest.controller.name = controller_name
        contest.save()

        test_env = create_environ()
        test_env['problem_instance_id'] = 1
        test_env['round_id'] = 1
        test_env['contest_id'] = contest.id

        url = reverse('default_ranking', kwargs={'contest_id': contest.id})

        for i in [1, 3, 4]:
            test_env['submission_id'] = i
            update_user_results(test_env)

        self.assertTrue(self.client.login(username='******'))
        response = self.client.get(url)
        self.assertContains(response, 'Test User')
        self.assertNotContains(response, 'Test User 2')
        self.assertContains(response, '34')
Пример #11
0
 def test_cascade_job(self):
     env = create_environ()
     env.update(dict(recipe=hunting, area='forest'))
     env = delay_environ_wrapper(env).get()
     self.assertEqual('Hedgehog hunted.', env['output'])
Пример #12
0
 def test_cascade_job(self):
     env = create_environ()
     env.update(dict(recipe=hunting, area='forest'))
     env = delay_environ_wrapper(env).get()
     self.assertEqual('Hedgehog hunted.', env['output'])