def fill_evaluation_environ(self, environ, submission, **kwargs): self.generate_base_environ(environ, submission, **kwargs) if 'USER_OUTS' in environ['submission_kind']: environ['report_kinds'] = ['USER_OUTS'] environ['save_outputs'] = True recipe_body = self.generate_recipe(environ['report_kinds']) extend_after_placeholder(environ, 'after_compile', recipe_body) environ.setdefault('group_scorer', 'oioioi.programs.utils.min_group_scorer') environ.setdefault('score_aggregator', 'oioioi.programs.utils.sum_score_aggregator') checker = OutputChecker.objects.get(problem=self.problem).exe_file if checker: environ['checker'] = django_to_filetracker_path(checker) if 'INITIAL' in environ['report_kinds']: add_before_placeholder(environ, 'after_initial_tests', ('update_report_statuses', 'oioioi.contests.handlers.update_report_statuses')) add_before_placeholder(environ, 'after_initial_tests', ('update_submission_score', 'oioioi.contests.handlers.update_submission_score'))
def fill_evaluation_environ(self, environ, submission, **kwargs): self.generate_base_environ(environ, submission, **kwargs) if 'USER_OUTS' in environ['submission_kind']: environ['report_kinds'] = ['USER_OUTS'] environ['save_outputs'] = True recipe_body = self.generate_recipe(environ['report_kinds']) extend_after_placeholder(environ, 'after_compile', recipe_body) environ.setdefault('group_scorer', 'oioioi.programs.utils.min_group_scorer') environ.setdefault('score_aggregator', 'oioioi.programs.utils.sum_score_aggregator') checker = OutputChecker.objects.get(problem=self.problem).exe_file if checker: environ['checker'] = django_to_filetracker_path(checker) if 'INITIAL' in environ['report_kinds']: add_before_placeholder( environ, 'after_initial_tests', ('update_report_statuses', 'oioioi.contests.handlers.update_report_statuses')) add_before_placeholder( environ, 'after_initial_tests', ('update_submission_score', 'oioioi.contests.handlers.update_submission_score'))
def fill_evaluation_environ_post_problem(self, environ, submission): """Run after ProblemController.fill_evaluation_environ.""" if 'INITIAL' in environ['report_kinds']: add_before_placeholder(environ, 'after_initial_tests', ('update_report_statuses', 'oioioi.contests.handlers.update_report_statuses')) add_before_placeholder(environ, 'after_initial_tests', ('update_submission_score', 'oioioi.contests.handlers.update_submission_score'))
def finalize_evaluation_environment(self, environ): super(SuspendJudgeContestControllerMixin, self) \ .finalize_evaluation_environment(environ) add_after_recipe_entry(environ, 'mark_submission_in_progress', ( 'check_problem_instance_state', 'oioioi.suspendjudge.handlers.check_problem_instance_state', dict(suspend_init_tests=True))) try: add_before_placeholder(environ, 'before_final_tests', ( 'check_problem_instance_state', 'oioioi.suspendjudge.handlers.check_problem_instance_state')) except IndexError: pass
def fill_evaluation_environ(self, environ, submission): submission = submission.programsubmission environ['source_file'] = \ django_to_filetracker_path(submission.source_file) environ['language'] = \ os.path.splitext(submission.source_file.name)[1][1:] super(ProgrammingContestController, self).fill_evaluation_environ(environ, submission) add_before_placeholder(environ, 'after_initial_tests', ('update_report_statuses', 'oioioi.contests.handlers.update_report_statuses')) add_before_placeholder(environ, 'after_initial_tests', ('update_submission_score', 'oioioi.contests.handlers.update_submission_score'))
def fill_evaluation_environ(self, environ, submission): submission = submission.programsubmission environ['source_file'] = \ django_to_filetracker_path(submission.source_file) environ['language'] = self._get_language(submission.source_file) environ['compilation_result_size_limit'] = \ self.get_compilation_result_size_limit() super(ProgrammingContestController, self).fill_evaluation_environ(environ, submission) add_before_placeholder(environ, 'after_initial_tests', ('update_report_statuses', 'oioioi.contests.handlers.update_report_statuses')) add_before_placeholder(environ, 'after_initial_tests', ('update_submission_score', 'oioioi.contests.handlers.update_submission_score'))
def finalize_evaluation_environment(self, environ): super(SuspendJudgeContestControllerMixin, self) \ .finalize_evaluation_environment(environ) add_after_recipe_entry( environ, 'mark_submission_in_progress', ('check_problem_instance_state', 'oioioi.suspendjudge.handlers.check_problem_instance_state', dict(suspend_init_tests=True))) try: add_before_placeholder( environ, 'before_final_tests', ('check_problem_instance_state', 'oioioi.suspendjudge.handlers.check_problem_instance_state')) except IndexError: pass
def fill_evaluation_environ(self, environ, submission, **kwargs): super(SplitEvalContestControllerMixin, self).fill_evaluation_environ(environ, submission, **kwargs) if not self.use_spliteval(submission): return environ.setdefault('sioworkers_extra_args', {}) \ .setdefault('NORMAL', {})['queue'] = 'sioworkers-lowprio' try: add_before_placeholder( environ, 'before_final_tests', ('postpone_final', 'oioioi.evalmgr.handlers.postpone', dict(queue='evalmgr-lowprio'))) except IndexError: # This may happen if some controller modifies the evaluation # environment so that the after_initial_tests label is no more. # This happens for OI's test run functionality, among others. pass
def fill_evaluation_environ(self, environ, submission): submission = submission.programsubmission environ['source_file'] = \ django_to_filetracker_path(submission.source_file) environ['language'] = \ os.path.splitext(submission.source_file.name)[1][1:] super(ProgrammingContestController, self).fill_evaluation_environ(environ, submission) add_before_placeholder( environ, 'after_initial_tests', ('update_report_statuses', 'oioioi.contests.handlers.update_report_statuses')) add_before_placeholder( environ, 'after_initial_tests', ('update_submission_score', 'oioioi.contests.handlers.update_submission_score'))
def fill_evaluation_environ(self, environ, submission, **kwargs): super(SplitEvalContestControllerMixin, self).fill_evaluation_environ(environ, submission, **kwargs) if not self.use_spliteval(submission): return environ.setdefault('sioworkers_extra_args', {}) \ .setdefault('NORMAL', {})['queue'] = 'sioworkers-lowprio' try: add_before_placeholder(environ, 'before_final_tests', ('postpone_final', 'oioioi.evalmgr.handlers.postpone', dict(queue='evalmgr-lowprio'))) except IndexError: # This may happen if some controller modifies the evaluation # environment so that the after_initial_tests label is no more. # This happens for OI's test run functionality, among others. pass