def fill_evaluation_environ(self, environ, submission, **kwargs): self.generate_base_environ(environ, submission, **kwargs) if 'USER_OUTS' in environ['submission_kind']: environ['report_kinds'] = ['USER_OUTS'] environ['save_outputs'] = True recipe_body = self.generate_recipe(environ['report_kinds']) extend_after_placeholder(environ, 'after_compile', recipe_body) environ.setdefault('group_scorer', 'oioioi.programs.utils.min_group_scorer') environ.setdefault('score_aggregator', 'oioioi.programs.utils.sum_score_aggregator') checker = OutputChecker.objects.get(problem=self.problem).exe_file if checker: environ['checker'] = django_to_filetracker_path(checker) if 'INITIAL' in environ['report_kinds']: add_before_placeholder( environ, 'after_initial_tests', ('update_report_statuses', 'oioioi.contests.handlers.update_report_statuses')) add_before_placeholder( environ, 'after_initial_tests', ('update_submission_score', 'oioioi.contests.handlers.update_submission_score'))
def fill_evaluation_environ(self, environ, submission, **kwargs): self.generate_base_environ(environ, submission, **kwargs) if 'USER_OUTS' in environ['submission_kind']: environ['report_kinds'] = ['USER_OUTS'] environ['save_outputs'] = True recipe_body = self.generate_recipe(environ['report_kinds']) extend_after_placeholder(environ, 'after_compile', recipe_body) environ.setdefault('group_scorer', 'oioioi.programs.utils.min_group_scorer') environ.setdefault('score_aggregator', 'oioioi.programs.utils.sum_score_aggregator') checker = OutputChecker.objects.get(problem=self.problem).exe_file if checker: environ['checker'] = django_to_filetracker_path(checker) if 'INITIAL' in environ['report_kinds']: add_before_placeholder(environ, 'after_initial_tests', ('update_report_statuses', 'oioioi.contests.handlers.update_report_statuses')) add_before_placeholder(environ, 'after_initial_tests', ('update_submission_score', 'oioioi.contests.handlers.update_submission_score'))
def fill_evaluation_environ(self, environ, submission, **kwargs): self.generate_base_environ(environ, submission, **kwargs) if environ['submission_kind'] != 'TESTRUN': return super(TestRunProblemControllerMixin, self) \ .fill_evaluation_environ(environ, submission, **kwargs) recipe_body = [ ('make_test', 'oioioi.testrun.handlers.make_test'), ('run_tests', 'oioioi.programs.handlers.run_tests',), ('run_tests_end', 'oioioi.programs.handlers.run_tests_end'), ('grade_submission', 'oioioi.testrun.handlers.grade_submission'), ('make_report', 'oioioi.testrun.handlers.make_report'), ] extend_after_placeholder(environ, 'after_compile', recipe_body) environ['error_handlers'].append(('delete_output', 'oioioi.testrun.handlers.delete_output')) environ['save_outputs'] = True environ['check_outputs'] = False environ['report_kinds'] = ['TESTRUN']
def fill_evaluation_environ(self, environ, **kwargs): self.generate_base_environ(environ, **kwargs) recipe_body = self.generate_recipe(environ['report_kinds']) extend_after_placeholder(environ, 'after_compile', recipe_body) environ.setdefault('group_scorer', 'oioioi.programs.utils.min_group_scorer') environ.setdefault('score_aggregator', 'oioioi.programs.utils.sum_score_aggregator') checker = OutputChecker.objects.get(problem=self.problem).exe_file if checker: environ['checker'] = django_to_filetracker_path(checker)
def fill_evaluation_environ(self, environ, **kwargs): if environ['submission_kind'] != 'TESTRUN': return super(TestRunProblemControllerMixin, self) \ .fill_evaluation_environ(environ, **kwargs) self.generate_base_environ(environ, **kwargs) recipe_body = [ ('make_test', 'oioioi.testrun.handlers.make_test'), ( 'run_tests', 'oioioi.programs.handlers.run_tests', ), ('grade_submission', 'oioioi.testrun.handlers.grade_submission'), ('make_report', 'oioioi.testrun.handlers.make_report'), ] extend_after_placeholder(environ, 'after_compile', recipe_body) environ['error_handlers'].append( ('delete_output', 'oioioi.testrun.handlers.delete_output')) environ['save_outputs'] = True environ['check_outputs'] = False