Example #1
0
def collect_tests(env, **kwargs):
    """Collects tests from the database and converts them to
       evaluation enviroments.

       Used ``environ`` keys:
         * ``problem_id``

       Produced ``environ`` keys:
          * ``tests``: a dictionary mapping test names to test envs
    """

    tests_of_type = {}
    env.setdefault("tests", {})

    problem = Problem.objects.get(id=env["problem_id"])

    tests = Test.objects.filter(problem=problem)
    for test in tests:
        test_env = {}
        test_env["id"] = test.id
        test_env["name"] = test.name
        test_env["in_file"] = django_to_filetracker_path(test.input_file)
        test_env["hint_file"] = django_to_filetracker_path(test.output_file)
        test_env["kind"] = test.kind
        test_env["group"] = test.group or test.name
        test_env["max_score"] = test.max_score
        if test.time_limit:
            test_env["exec_time_limit"] = test.time_limit
        if test.memory_limit:
            test_env["exec_mem_limit"] = test.memory_limit
        env["tests"][test.name] = test_env

    return env
Example #2
0
def collect_tests(env, **kwargs):
    """Collects tests from the database and converts them to
       evaluation enviroments.

       Used ``environ`` keys:
         * ``problem_id``

       Produced ``environ`` keys:
          * ``tests``: a dictionary mapping test names to test envs
    """

    tests_of_type = {}
    env.setdefault('tests', {})

    problem = Problem.objects.get(id=env['problem_id'])

    tests = Test.objects.filter(problem=problem)
    for test in tests:
        test_env = {}
        test_env['id'] = test.id
        test_env['name'] = test.name
        test_env['in_file'] = django_to_filetracker_path(test.input_file)
        test_env['hint_file'] = django_to_filetracker_path(test.output_file)
        test_env['kind'] = test.kind
        test_env['group'] = test.group or test.name
        test_env['max_score'] = test.max_score
        if test.time_limit:
            test_env['exec_time_limit'] = test.time_limit
        if test.memory_limit:
            test_env['exec_mem_limit'] = test.memory_limit
        env['tests'][test.name] = test_env

    return env
Example #3
0
def collect_tests(env, **kwargs):
    """Collects tests from the database and converts them to
       evaluation environments.

       Used ``environ`` keys:
         * ``problem_id``

       Produced ``environ`` keys:
          * ``tests``: a dictionary mapping test names to test envs
    """

    env.setdefault('tests', {})

    problem = Problem.objects.get(id=env['problem_id'])

    tests = Test.objects.filter(problem=problem)
    for test in tests:
        test_env = {}
        test_env['id'] = test.id
        test_env['name'] = test.name
        test_env['in_file'] = django_to_filetracker_path(test.input_file)
        test_env['hint_file'] = django_to_filetracker_path(test.output_file)
        test_env['kind'] = test.kind
        test_env['group'] = test.group or test.name
        test_env['max_score'] = test.max_score
        if test.time_limit:
            test_env['exec_time_limit'] = test.time_limit
        if test.memory_limit:
            test_env['exec_mem_limit'] = test.memory_limit
        env['tests'][test.name] = test_env

    return env
Example #4
0
    def test_django_to_filetracker_path(self):
        storage = FiletrackerStorage(prefix='/foo', client=DummyClient())
        field = FileField(storage=storage)
        value = FieldFile(None, field, 'bar')
        self.assertEqual(django_to_filetracker_path(value), '/foo/bar')

        with self.assertRaises(ValueError):
            django_to_filetracker_path(ContentFile('whatever', name='gizmo'))

        self.assertEqual('/foo/bar', django_to_filetracker_path(
                filetracker_to_django_file('/foo/bar', storage=storage)))
Example #5
0
    def test_django_to_filetracker_path(self):
        storage = FiletrackerStorage(prefix='/foo', client=DummyClient())
        field = FileField(storage=storage)
        value = FieldFile(None, field, 'bar')
        self.assertEqual(django_to_filetracker_path(value), '/foo/bar')

        with self.assertRaises(ValueError):
            django_to_filetracker_path(ContentFile('whatever', name='gizmo'))

        self.assertEqual('/foo/bar', django_to_filetracker_path(
                filetracker_to_django_file('/foo/bar', storage=storage)))
Example #6
0
    def fill_evaluation_environ(self, environ, submission, **kwargs):
        self.generate_base_environ(environ, submission, **kwargs)

        if 'USER_OUTS' in environ['submission_kind']:
            environ['report_kinds'] = ['USER_OUTS']
            environ['save_outputs'] = True

        recipe_body = self.generate_recipe(environ['report_kinds'])

        extend_after_placeholder(environ, 'after_compile', recipe_body)

        environ.setdefault('group_scorer',
                            'oioioi.programs.utils.min_group_scorer')
        environ.setdefault('score_aggregator',
                'oioioi.programs.utils.sum_score_aggregator')

        checker = OutputChecker.objects.get(problem=self.problem).exe_file
        if checker:
            environ['checker'] = django_to_filetracker_path(checker)

        if 'INITIAL' in environ['report_kinds']:
            add_before_placeholder(environ, 'after_initial_tests',
                    ('update_report_statuses',
                        'oioioi.contests.handlers.update_report_statuses'))
            add_before_placeholder(environ, 'after_initial_tests',
                    ('update_submission_score',
                        'oioioi.contests.handlers.update_submission_score'))
Example #7
0
    def _verify_inputs(self, tests):
        """Check if correct solution exits with code 0 on all tests.
           :raises: :class:`~oioioi.problems.package.ProblemPackageError`
           otherwise.
        """
        env = self._find_and_compile('inwer')
        if env and not self.use_make:
            jobs = {}

            for test in tests:
                job = env.copy()
                job['job_type'] = 'inwer'
                job['task_priority'] = TASK_PRIORITY
                job['exe_file'] = env['compiled_file']
                job['in_file'] = django_to_filetracker_path(test.input_file)
                job['use_sandboxes'] = self.use_sandboxes
                jobs[test.name] = job

            jobs = run_sioworkers_jobs(jobs)
            get_client().delete_file(env['compiled_file'])

            for test_name, job in six.iteritems(jobs):
                if job['result_code'] != 'OK':
                    raise ProblemPackageError(_("Inwer failed on test "
                        "%(test)s. Inwer output %(output)s") %
                        {
                            'test': test_name,
                            'output': '\n'.join(job['stdout'])}
                        )

            logger.info("%s: inwer success", self.filename)
Example #8
0
    def generate_initial_evaluation_environ(self, environ, submission,
                                            **kwargs):
        problem_instance = submission.problem_instance
        problem = problem_instance.problem
        contest = problem_instance.contest
        if contest is not None:
            round = problem_instance.round

        submission = submission.programsubmission
        environ['source_file'] = \
            django_to_filetracker_path(submission.source_file)
        environ['language'] = problem_instance.controller \
            ._get_language(submission.source_file, problem_instance)
        environ['compilation_result_size_limit'] = \
            problem_instance.controller \
                .get_compilation_result_size_limit(submission)

        environ['submission_id'] = submission.id
        environ['submission_kind'] = submission.kind
        environ['problem_instance_id'] = problem_instance.id
        environ['problem_id'] = problem.id
        environ['problem_short_name'] = problem.short_name
        if contest is not None:
            environ['round_id'] = round.id
            environ['contest_id'] = contest.id
        environ['submission_owner'] = submission.user.username \
                                      if submission.user else None

        environ.setdefault('report_kinds', ['INITIAL', 'NORMAL'])
        if 'hidden_judge' in environ['extra_args']:
            environ['report_kinds'] = ['HIDDEN']
Example #9
0
    def generate_initial_evaluation_environ(self, environ, submission,
                                            **kwargs):
        problem_instance = submission.problem_instance
        problem = problem_instance.problem
        contest = problem_instance.contest
        if contest is not None:
            round = problem_instance.round

        submission = submission.programsubmission
        environ['source_file'] = \
            django_to_filetracker_path(submission.source_file)
        environ['language'] = problem_instance.controller \
            ._get_language(submission.source_file, problem_instance)
        environ['compilation_result_size_limit'] = \
            problem_instance.controller \
                .get_compilation_result_size_limit(submission)

        environ['submission_id'] = submission.id
        environ['submission_kind'] = submission.kind
        environ['problem_instance_id'] = problem_instance.id
        environ['problem_id'] = problem.id
        environ['problem_short_name'] = problem.short_name
        if contest is not None:
            environ['round_id'] = round.id
            environ['contest_id'] = contest.id
        environ['submission_owner'] = submission.user.username \
                                      if submission.user else None

        environ.setdefault('report_kinds', ['INITIAL', 'NORMAL'])
        if 'hidden_judge' in environ['extra_args']:
            environ['report_kinds'] = ['HIDDEN']
Example #10
0
    def _verify_inputs(self, tests):
        """Check if correct solution exits with code 0 on all tests.
           :raises: :class:`~oioioi.problems.package.ProblemPackageError`
           otherwise.
        """
        env = self._find_and_compile('inwer')
        if env and not self.use_make:
            jobs = {}

            for test in tests:
                job = env.copy()
                job['job_type'] = 'inwer'
                job['task_priority'] = TASK_PRIORITY
                job['exe_file'] = env['compiled_file']
                job['in_file'] = django_to_filetracker_path(test.input_file)
                job['use_sandboxes'] = self.use_sandboxes
                jobs[test.name] = job

            jobs = run_sioworkers_jobs(jobs)
            get_client().delete_file(env['compiled_file'])

            for test_name, job in jobs.iteritems():
                if job['result_code'] != 'OK':
                    raise ProblemPackageError(
                        _("Inwer failed on test "
                          "%(test)s. Inwer output %(output)s") % {
                              'test': test_name,
                              'output': '\n'.join(job['stdout'])
                          })

            logger.info("%s: inwer success", self.filename)
Example #11
0
def add_extra_files(environ, problem, additional_args=None):
    try:
        config = ExtraConfig.objects.get(problem=problem) \
                 .parsed_config
    except ExtraConfig.DoesNotExist:
        config = {}

    lang = environ['language']
    extra_args = config.get('extra_compilation_args', {}).get(lang, [])
    if isinstance(extra_args, six.string_types):
        extra_args = [extra_args]
    if additional_args:
        extra_args.extend(additional_args.get(lang, []))
    if extra_args:
        environ['extra_compilation_args'] = extra_args

    extra_file_names = config.get('extra_compilation_files', [])
    extra_files = ExtraFile.objects.filter(problem=problem,
            name__in=extra_file_names).all()
    if len(extra_file_names) != len(extra_files):
        raise RuntimeError('Did not find expected extra files: '
                + ', '.join(extra_file_names))
    environ['extra_files'] = dict(
            (ef.name, django_to_filetracker_path(ef.file))
            for ef in extra_files)
Example #12
0
    def test_filetracker_to_django_field(self):
        data = 'eloziom'
        path = 'my/path'
        abspath = '/' + path

        storage = default_storage
        try:
            self.assertEqual(storage.save(path, ContentFile(data)), path)

            model = TestFileModel()
            # File field is ignoring preferred name, as we can't copy file
            # in filetracker to another location
            with self.assertRaises(NotImplementedError):
                model.file_field.save(
                    'xx', filetracker_to_django_file(abspath, storage))

            model.file_field = filetracker_to_django_file(abspath, storage)
            model.save()
            self.assertEqual(model.file_field.name, path)
            pk = model.pk

            # Here the model is removed from Django's cache, so the query
            # below actually hits the database.
            del model

            model = TestFileModel.objects.get(pk=pk)
            self.assertEqual(model.file_field.name, path)
            self.assertEqual(django_to_filetracker_path(model.file_field),
                             abspath)
            self.assertEqual(model.file_field.read(), data)
        finally:
            default_storage.delete(path)
Example #13
0
    def _make_outs(self, outs_to_make):
        """Compiles the model solution and executes it in order to generate
        test outputs.

        :return: Result from workers.
        """
        env = self._find_and_compile('', command='outgen')
        if not env:
            return {}

        jobs = {}
        for outname, test in outs_to_make:
            job = env.copy()
            job['job_type'] = 'exec' if self.use_sandboxes else 'unsafe-exec'
            job['task_priority'] = TASK_PRIORITY
            job['exe_file'] = env['compiled_file']
            job['upload_out'] = True
            job['in_file'] = django_to_filetracker_path(test.input_file)
            job['out_file'] = outname
            if test.memory_limit:
                job['exec_mem_limit'] = test.memory_limit
            jobs[test.name] = job

        jobs = run_sioworkers_jobs(jobs)
        get_client().delete_file(env['compiled_file'])
        return jobs
Example #14
0
def make_test(env, **kwargs):
    """Creates a testcase *test* from the user input and converts it to
       evaluation environment.

       Used ``environ`` keys:
         * ``submission_id``
         * ``problem_id``

       Produced ``environ`` keys:
          * ``tests``: a dictionary mapping test names to test envs
    """
    submission = TestRunProgramSubmission.objects.get(id=env['submission_id'])
    assert submission.kind == 'TESTRUN'
    config = TestRunConfig.objects.get(problem__id=env['problem_id'])

    test_env = {}
    test_env['name'] = 'test'
    test_env['in_file'] = django_to_filetracker_path(submission.input_file)
    test_env['out_file'] = '/testruns/%s/%d/%s-out' \
            % (env['contest_id'], env['submission_id'], env['job_id'])
    if config.time_limit:
        test_env['exec_time_limit'] = config.time_limit
    if config.memory_limit:
        test_env['exec_mem_limit'] = config.memory_limit

    env['tests'] = {'test': test_env}

    return env
Example #15
0
def add_extra_files(environ, problem, additional_args=None):
    try:
        config = ExtraConfig.objects.get(problem=problem).parsed_config
    except ExtraConfig.DoesNotExist:
        config = {}

    lang = environ['language']
    extra_args = config.get('extra_compilation_args', {}).get(lang, [])
    if isinstance(extra_args, six.string_types):
        extra_args = [extra_args]
    if additional_args:
        extra_args.extend(additional_args.get(lang, []))
    if extra_args:
        environ['extra_compilation_args'] = extra_args

    extra_file_names = config.get('extra_compilation_files', [])
    extra_files = ExtraFile.objects.filter(
        problem=problem, name__in=extra_file_names
    ).all()
    if len(extra_file_names) != len(extra_files):
        raise RuntimeError(
            'Did not find expected extra files: ' + ', '.join(extra_file_names)
        )
    environ['extra_files'] = dict(
        (ef.name, django_to_filetracker_path(ef.file)) for ef in extra_files
    )
Example #16
0
def make_test(env, submission, **kwargs):
    """Creates a testcase *test* from the user input and converts it to
       evaluation environment.

       Used ``environ`` keys:
         * ``submission_id``
         * ``problem_id``

       Produced ``environ`` keys:
          * ``tests``: a dictionary mapping test names to test envs
    """
    assert submission.kind == 'TESTRUN'
    config = TestRunConfig.objects.get(problem__id=env['problem_id'])

    test_env = {}
    test_env['name'] = 'test'
    test_env['in_file'] = django_to_filetracker_path(submission.input_file)
    test_env['out_file'] = '/testruns/%s/%d/%s-out' \
            % (env['contest_id'], env['submission_id'], env['job_id'])
    test_env['to_judge'] = True
    if config.time_limit:
        test_env['exec_time_limit'] = config.time_limit
    if config.memory_limit:
        test_env['exec_mem_limit'] = config.memory_limit

    env['tests'] = {'test': test_env}

    return env
Example #17
0
    def test_filetracker_to_django_field(self):
        data = 'eloziom'
        path = 'my/path'
        abspath = '/' + path

        storage = default_storage
        try:
            self.assertEqual(storage.save(path, ContentFile(data)), path)

            model = TestFileModel()
            # File field is ignoring preferred name, as we can't copy file
            # in filetracker to another location
            with self.assertRaises(NotImplementedError):
                model.file_field.save('xx',
                        filetracker_to_django_file(abspath, storage))

            model.file_field = filetracker_to_django_file(abspath, storage)
            model.save()
            self.assertEqual(model.file_field.name, path)
            pk = model.pk

            # Here the model is removed from Django's cache, so the query
            # below actually hits the database.
            del model

            model = TestFileModel.objects.get(pk=pk)
            self.assertEqual(model.file_field.name, path)
            self.assertEqual(django_to_filetracker_path(model.file_field),
                                abspath)
            self.assertEqual(model.file_field.read(), data)
        finally:
            default_storage.delete(path)
Example #18
0
    def fill_evaluation_environ(self, environ, submission, **kwargs):
        self.generate_base_environ(environ, submission, **kwargs)

        if 'USER_OUTS' in environ['submission_kind']:
            environ['report_kinds'] = ['USER_OUTS']
            environ['save_outputs'] = True

        recipe_body = self.generate_recipe(environ['report_kinds'])

        extend_after_placeholder(environ, 'after_compile', recipe_body)

        environ.setdefault('group_scorer',
                           'oioioi.programs.utils.min_group_scorer')
        environ.setdefault('score_aggregator',
                           'oioioi.programs.utils.sum_score_aggregator')

        checker = OutputChecker.objects.get(problem=self.problem).exe_file
        if checker:
            environ['checker'] = django_to_filetracker_path(checker)

        if 'INITIAL' in environ['report_kinds']:
            add_before_placeholder(
                environ, 'after_initial_tests',
                ('update_report_statuses',
                 'oioioi.contests.handlers.update_report_statuses'))
            add_before_placeholder(
                environ, 'after_initial_tests',
                ('update_submission_score',
                 'oioioi.contests.handlers.update_submission_score'))
Example #19
0
    def fill_evaluation_environ(self, environ, submission):
        submission = submission.programsubmission
        environ['source_file'] = \
            django_to_filetracker_path(submission.source_file)
        environ['language'] = self._get_language(submission.source_file)
        environ['compilation_result_size_limit'] = \
            self.get_compilation_result_size_limit()

        super(ProgrammingContestController,
                self).fill_evaluation_environ(environ, submission)

        self.fill_evaluation_environ_post_problem(environ, submission)
Example #20
0
    def fill_evaluation_environ(self, problem, environ, **kwargs):
        environ['recipe'] = [
            ('compile', 'oioioi.programs.handlers.compile'),
            recipe_placeholder('after_compile'),
            ('collect_tests', 'oioioi.programs.handlers.collect_tests'),
            ('initial_run_tests', 'oioioi.programs.handlers.run_tests',
             dict(kind='EXAMPLE')),
            ('initial_grade_tests', 'oioioi.programs.handlers.grade_tests'),
            ('initial_grade_groups', 'oioioi.programs.handlers.grade_groups'),
            ('initial_grade_submission',
             'oioioi.programs.handlers.grade_submission'),
            ('initial_make_report', 'oioioi.programs.handlers.make_report',
             dict(kind='INITIAL')),
            recipe_placeholder('after_initial_tests'),

            #                ('postpone_final',
            #                    'oioioi.programs.handlers.postpone',
            #                    dict(queue='evalmgr-lowprio')),
            ('final_run_tests', 'oioioi.programs.handlers.run_tests',
             dict(kind='NORMAL')),
            ('final_grade_tests', 'oioioi.programs.handlers.grade_tests'),
            ('final_grade_groups', 'oioioi.programs.handlers.grade_groups'),
            ('final_grade_submission',
             'oioioi.programs.handlers.grade_submission'),
            ('final_make_report', 'oioioi.programs.handlers.make_report'),
            recipe_placeholder('after_final_tests'),
            ('delete_executable',
             'oioioi.programs.handlers.delete_executable'),
        ]

        environ['error_handlers'] = [
            ('delete_executable', 'oioioi.programs.handlers.delete_executable')
        ]

        environ['group_scorer'] = \
                'oioioi.programs.utils.min_group_scorer'
        environ['score_aggregator'] = \
                'oioioi.programs.utils.sum_score_aggregator'

        checker = OutputChecker.objects.get(problem=problem).exe_file
        if checker:
            environ['checker'] = django_to_filetracker_path(checker)

        if getattr(settings, 'USE_UNSAFE_EXEC', False):
            environ['exec_mode'] = 'unsafe'
        else:
            environ['exec_mode'] = settings.SAFE_EXEC_MODE

        if getattr(settings, 'USE_LOCAL_COMPILERS', False):
            environ['compiler'] = 'system-' + environ['language']
Example #21
0
    def generate_initial_evaluation_environ(self, environ, submission,
                                            **kwargs):
        problem_instance = submission.problem_instance
        problem = problem_instance.problem
        contest = problem_instance.contest
        if contest is not None:
            round = problem_instance.round

        submission = submission.programsubmission
        environ['source_file'] = \
            django_to_filetracker_path(submission.source_file)
        environ['language'] = problem_instance.controller \
            ._get_language(submission.source_file, problem_instance)
        environ['compilation_result_size_limit'] = \
            problem_instance.controller \
                .get_compilation_result_size_limit(submission)

        environ['submission_id'] = submission.id
        environ['submission_kind'] = submission.kind
        environ['problem_instance_id'] = problem_instance.id
        environ['problem_id'] = problem.id
        environ['problem_short_name'] = problem.short_name
        if contest is not None:
            environ['round_id'] = round.id
            environ['contest_id'] = contest.id
        environ['submission_owner'] = submission.user.username \
                                      if submission.user else None
        environ['oioioi_instance'] = settings.SITE_NAME
        environ['contest_priority'] = contest.judging_priority \
            if contest is not None else settings.NON_CONTEST_PRIORITY
        environ['contest_priority'] += settings.OIOIOI_INSTANCE_PRIORITY_BONUS
        environ['contest_weight'] = contest.judging_weight \
            if contest is not None else settings.NON_CONTEST_WEIGHT
        environ['contest_weight'] += settings.OIOIOI_INSTANCE_WEIGHT_BONUS

        environ.setdefault('report_kinds', ['INITIAL', 'NORMAL'])
        if 'hidden_judge' in environ['extra_args']:
            environ['report_kinds'] = ['HIDDEN']

        environ['compiler'] = self.get_compiler_for_submission(submission)
        if contest:
            contest_compiler = contest.controller \
                .get_compiler_for_submission(submission)
            if contest_compiler:
                # contest compiler is more important than problem compiler
                environ['compiler'] = contest_compiler

        if getattr(settings, 'USE_LOCAL_COMPILERS', False):
            environ['compiler'] = 'system-' + environ['language']
Example #22
0
    def fill_evaluation_environ(self, environ, **kwargs):
        self.generate_base_environ(environ, **kwargs)

        recipe_body = self.generate_recipe(environ['report_kinds'])

        extend_after_placeholder(environ, 'after_compile', recipe_body)

        environ.setdefault('group_scorer',
                            'oioioi.programs.utils.min_group_scorer')
        environ.setdefault('score_aggregator',
                'oioioi.programs.utils.sum_score_aggregator')

        checker = OutputChecker.objects.get(problem=self.problem).exe_file
        if checker:
            environ['checker'] = django_to_filetracker_path(checker)
Example #23
0
    def fill_evaluation_environ(self, environ, submission):
        submission = submission.programsubmission
        environ['source_file'] = \
            django_to_filetracker_path(submission.source_file)
        environ['language'] = \
            os.path.splitext(submission.source_file.name)[1][1:]

        super(ProgrammingContestController,
                self).fill_evaluation_environ(environ, submission)

        add_before_placeholder(environ, 'after_initial_tests',
                ('update_report_statuses',
                    'oioioi.contests.handlers.update_report_statuses'))
        add_before_placeholder(environ, 'after_initial_tests',
                ('update_submission_score',
                    'oioioi.contests.handlers.update_submission_score'))
Example #24
0
    def generate_initial_evaluation_environ(self, environ, submission, **kwargs):
        problem_instance = submission.problem_instance
        problem = problem_instance.problem
        contest = problem_instance.contest
        if contest is not None:
            round = problem_instance.round

        submission = submission.programsubmission
        environ['source_file'] = django_to_filetracker_path(submission.source_file)
        environ['language'] = get_extension(submission.source_file.name)
        environ[
            'compilation_result_size_limit'
        ] = problem_instance.controller.get_compilation_result_size_limit(submission)

        environ['submission_id'] = submission.id
        environ['submission_kind'] = submission.kind
        environ['problem_instance_id'] = problem_instance.id
        environ['problem_id'] = problem.id
        environ['problem_short_name'] = problem.short_name
        if contest is not None:
            environ['round_id'] = round.id
            environ['contest_id'] = contest.id
        environ['submission_owner'] = (
            submission.user.username if submission.user else None
        )
        environ['oioioi_instance'] = settings.SITE_NAME
        environ['contest_priority'] = (
            contest.judging_priority
            if contest is not None
            else settings.NON_CONTEST_PRIORITY
        )
        environ['contest_priority'] += settings.OIOIOI_INSTANCE_PRIORITY_BONUS
        environ['contest_weight'] = (
            contest.judging_weight
            if contest is not None
            else settings.NON_CONTEST_WEIGHT
        )
        environ['contest_weight'] += settings.OIOIOI_INSTANCE_WEIGHT_BONUS

        environ.setdefault('report_kinds', ['INITIAL', 'NORMAL'])
        if 'hidden_judge' in environ['extra_args']:
            environ['report_kinds'] = ['HIDDEN']

        environ['compiler'] = problem_instance.controller.get_compiler_for_submission(
            submission
        )
Example #25
0
    def fill_evaluation_environ(self, environ, submission):
        submission = submission.programsubmission
        environ['source_file'] = \
            django_to_filetracker_path(submission.source_file)
        environ['language'] = self._get_language(submission.source_file)
        environ['compilation_result_size_limit'] = \
            self.get_compilation_result_size_limit()

        super(ProgrammingContestController,
                self).fill_evaluation_environ(environ, submission)

        add_before_placeholder(environ, 'after_initial_tests',
                ('update_report_statuses',
                    'oioioi.contests.handlers.update_report_statuses'))
        add_before_placeholder(environ, 'after_initial_tests',
                ('update_submission_score',
                    'oioioi.contests.handlers.update_submission_score'))
Example #26
0
    def fill_evaluation_environ(self, environ, submission):
        submission = submission.programsubmission
        environ['source_file'] = \
            django_to_filetracker_path(submission.source_file)
        environ['language'] = \
            os.path.splitext(submission.source_file.name)[1][1:]

        super(ProgrammingContestController,
              self).fill_evaluation_environ(environ, submission)

        add_before_placeholder(
            environ, 'after_initial_tests',
            ('update_report_statuses',
             'oioioi.contests.handlers.update_report_statuses'))
        add_before_placeholder(
            environ, 'after_initial_tests',
            ('update_submission_score',
             'oioioi.contests.handlers.update_submission_score'))
Example #27
0
    def _make_outs(self, outs_to_make):
        env = self._find_and_compile('', command='outgen')
        if not env:
            return {}

        jobs = {}
        for outname, test in outs_to_make:
            job = env.copy()
            job['job_type'] = 'exec' if self.use_sandboxes else 'unsafe-exec'
            job['exe_file'] = env['compiled_file']
            job['upload_out'] = True
            job['in_file'] = django_to_filetracker_path(test.input_file)
            job['out_file'] = outname
            jobs[test.name] = job

        jobs = run_sioworkers_jobs(jobs)
        get_client().delete_file(env['compiled_file'])
        return jobs
Example #28
0
    def fill_evaluation_environ(self, problem, environ, **kwargs):
        super(SinolProblemController, self).fill_evaluation_environ(problem,
                environ, **kwargs)

        try:
            config = ExtraConfig.objects.get(problem=problem).parsed_config
        except ExtraConfig.DoesNotExist:
            config = {}

        lang = environ['language']
        environ['extra_compilation_args'] = \
                str(config.get('extra_compilation_args', {}).get(lang, ''))

        extra_file_names = config.get('extra_compilation_files', ())
        extra_files = ExtraFile.objects.filter(problem=problem,
                name__in=extra_file_names).all()
        if len(extra_file_names) != len(extra_files):
            raise RuntimeError('Did not find expected extra files: '
                    + ', '.join(extra_file_names))
        environ['extra_files'] = dict(
                (ef.name, django_to_filetracker_path(ef.file))
                for ef in extra_files)
Example #29
0
    def fill_evaluation_environ(self, problem, environ, **kwargs):
        super(SinolProblemController,
              self).fill_evaluation_environ(problem, environ, **kwargs)

        try:
            config = ExtraConfig.objects.get(problem=problem).parsed_config
        except ExtraConfig.DoesNotExist:
            config = {}

        lang = environ['language']
        environ['extra_compilation_args'] = \
                str(config.get('extra_compilation_args', {}).get(lang, ''))

        extra_file_names = config.get('extra_compilation_files', ())
        extra_files = ExtraFile.objects.filter(
            problem=problem, name__in=extra_file_names).all()
        if len(extra_file_names) != len(extra_files):
            raise RuntimeError('Did not find expected extra files: ' +
                               ', '.join(extra_file_names))
        environ['extra_files'] = dict(
            (ef.name, django_to_filetracker_path(ef.file))
            for ef in extra_files)
Example #30
0
    def _make_outs(self, outs_to_make):
        """Run jobs to generate test outputs.
           :return: Result from workers.
        """
        env = self._find_and_compile('', command='outgen')
        if not env:
            return {}

        jobs = {}
        for outname, test in outs_to_make:
            job = env.copy()
            job['job_type'] = 'exec' if self.use_sandboxes else 'unsafe-exec'
            job['task_priority'] = TASK_PRIORITY
            job['exe_file'] = env['compiled_file']
            job['upload_out'] = True
            job['in_file'] = django_to_filetracker_path(test.input_file)
            job['out_file'] = outname
            if test.memory_limit:
                job['exec_mem_limit'] = test.memory_limit
            jobs[test.name] = job

        jobs = run_sioworkers_jobs(jobs)
        get_client().delete_file(env['compiled_file'])
        return jobs
Example #31
0
    def _verify_ins(self, tests):
        env = self._find_and_compile('inwer')
        if env and not self.use_make:
            jobs = {}

            for test in tests:
                job = env.copy()
                job['job_type'] = 'inwer'
                job['exe_file'] = env['compiled_file']
                job['in_file'] = django_to_filetracker_path(test.input_file)
                job['use_sandboxes'] = self.use_sandboxes
                jobs[test.name] = job

            jobs = run_sioworkers_jobs(jobs)
            get_client().delete_file(env['compiled_file'])

            for test_name, job in jobs.iteritems():
                if job['result_code'] != 'OK':
                    raise ProblemPackageError(_("Inwer failed on test "
                        "%(test)s. Inwer output %(output)s") %
                        {'test': test_name, 'output': '\n'.join(job['stdout'])}
                        )

            logger.info("%s: inwer success", self.filename)
Example #32
0
def collect_tests(env, **kwargs):
    """Collects tests from the database and converts them to
       evaluation environments.

       Used ``environ`` keys:
         * ``problem_instance_id``

       Produced ``environ`` keys:
          * ``tests``: a dictionary mapping test names to test envs
    """

    env.setdefault('tests', {})

    if 'tests_subset' in env['extra_args']:
        tests = list(
            Test.objects.in_bulk(env['extra_args']['tests_subset']).values())
    else:
        tests = Test.objects.filter(
            problem_instance__id=env['problem_instance_id'], is_active=True)

    problem_instance = env['problem_instance_id']
    if env['is_rejudge']:
        submission = env['submission_id']
        rejudge_type = env['extra_args'].setdefault('rejudge_type', 'FULL')
        tests_to_judge = env['extra_args'].setdefault('tests_to_judge', [])
        test_reports = TestReport.objects.filter(
            submission_report__submission__id=submission,
            submission_report__status='ACTIVE')
        tests_used = [report.test_name for report in test_reports]
        if rejudge_type == 'NEW':
            tests_to_judge = [
                t.name for t in Test.objects.filter(
                    problem_instance__id=problem_instance,
                    is_active=True).exclude(name__in=tests_used)
            ]
        elif rejudge_type == 'JUDGED':
            tests = Test.objects.filter(problem_instance__id=problem_instance,
                                        name__in=tests_used)
            tests_to_judge = [t for t in tests_to_judge if t in tests_used]
        elif rejudge_type == 'FULL':
            tests_to_judge = [t.name for t in tests]
    else:
        tests_to_judge = [t.name for t in tests]

    for test in tests:
        test_env = {}
        test_env['id'] = test.id
        test_env['name'] = test.name
        test_env['in_file'] = django_to_filetracker_path(test.input_file)
        test_env['hint_file'] = django_to_filetracker_path(test.output_file)
        test_env['kind'] = test.kind
        test_env['group'] = test.group or test.name
        test_env['max_score'] = test.max_score
        test_env['order'] = test.order
        if test.time_limit:
            test_env['exec_time_limit'] = test.time_limit
        if test.memory_limit:
            test_env['exec_mem_limit'] = test.memory_limit
        test_env['to_judge'] = False
        env['tests'][test.name] = test_env

    for test in tests_to_judge:
        env['tests'][test]['to_judge'] = True
    return env
Example #33
0
def collect_tests(env, **kwargs):
    """Collects tests from the database and converts them to
       evaluation environments.

       Used ``environ`` keys:
         * ``problem_instance_id``

       Produced ``environ`` keys:
          * ``tests``: a dictionary mapping test names to test envs
    """

    env.setdefault('tests', {})

    if 'tests_subset' in env['extra_args']:
        tests = Test.objects.in_bulk(env['extra_args']['tests_subset']) \
                                                                    .values()
    else:
        tests = Test.objects.filter(
            problem_instance__id=env['problem_instance_id'],
            is_active=True)

    problem_instance = env['problem_instance_id']
    if env['is_rejudge']:
        submission = env['submission_id']
        rejudge_type = env['extra_args'].setdefault('rejudge_type', 'FULL')
        tests_to_judge = env['extra_args'].setdefault('tests_to_judge', [])
        test_reports = TestReport.objects.filter(
            submission_report__submission__id=submission,
            submission_report__status='ACTIVE')
        tests_used = [report.test_name for report in test_reports]
        if rejudge_type == 'NEW':
            tests_to_judge = [t.name for t in
                              Test.objects.filter(
                                  problem_instance__id=problem_instance,
                                  is_active=True)
                              .exclude(name__in=tests_used)]
        elif rejudge_type == 'JUDGED':
            tests = Test.objects.filter(
                problem_instance__id=problem_instance,
                name__in=tests_used)
            tests_to_judge = [t for t in tests_to_judge if t in tests_used]
        elif rejudge_type == 'FULL':
            tests_to_judge = [t.name for t in tests]
    else:
        tests_to_judge = [t.name for t in tests]

    for test in tests:
        test_env = {}
        test_env['id'] = test.id
        test_env['name'] = test.name
        test_env['in_file'] = django_to_filetracker_path(test.input_file)
        test_env['hint_file'] = django_to_filetracker_path(test.output_file)
        test_env['kind'] = test.kind
        test_env['group'] = test.group or test.name
        test_env['max_score'] = test.max_score
        test_env['order'] = test.order
        if test.time_limit:
            test_env['exec_time_limit'] = test.time_limit
        if test.memory_limit:
            test_env['exec_mem_limit'] = test.memory_limit
        test_env['to_judge'] = False
        env['tests'][test.name] = test_env

    for test in tests_to_judge:
        env['tests'][test]['to_judge'] = True
    return env
Example #34
0
def collect_tests(env, **kwargs):
    """Collects tests from the database and converts them to
       evaluation environments.

       Used ``environ`` keys:
         * ``problem_instance_id``

       Produced ``environ`` keys:
          * ``tests``: a dictionary mapping test names to test envs
    """

    env.setdefault("tests", {})

    if "tests_subset" in env["extra_args"]:
        tests = Test.objects.in_bulk(env["extra_args"]["tests_subset"]).values()
    else:
        tests = Test.objects.filter(problem_instance__id=env["problem_instance_id"], is_active=True)

    problem_instance = env["problem_instance_id"]
    if env["is_rejudge"]:
        submission = env["submission_id"]
        rejudge_type = env["extra_args"].setdefault("rejudge_type", "FULL")
        tests_to_judge = env["extra_args"].setdefault("tests_to_judge", [])
        test_reports = TestReport.objects.filter(
            submission_report__submission__id=submission, submission_report__status="ACTIVE"
        )
        tests_used = [report.test_name for report in test_reports]
        if rejudge_type == "NEW":
            tests_to_judge = [
                t.name
                for t in Test.objects.filter(problem_instance__id=problem_instance, is_active=True).exclude(
                    name__in=tests_used
                )
            ]
        elif rejudge_type == "JUDGED":
            tests = Test.objects.filter(problem_instance__id=problem_instance, name__in=tests_used)
            tests_to_judge = [t for t in tests_to_judge if t in tests_used]
        elif rejudge_type == "FULL":
            tests_to_judge = [t.name for t in tests]
    else:
        tests_to_judge = [t.name for t in tests]

    for test in tests:
        test_env = {}
        test_env["id"] = test.id
        test_env["name"] = test.name
        test_env["in_file"] = django_to_filetracker_path(test.input_file)
        test_env["hint_file"] = django_to_filetracker_path(test.output_file)
        test_env["kind"] = test.kind
        test_env["group"] = test.group or test.name
        test_env["max_score"] = test.max_score
        test_env["order"] = test.order
        if test.time_limit:
            test_env["exec_time_limit"] = test.time_limit
        if test.memory_limit:
            test_env["exec_mem_limit"] = test.memory_limit
        test_env["to_judge"] = False
        env["tests"][test.name] = test_env

    for test in tests_to_judge:
        env["tests"][test]["to_judge"] = True
    return env
Example #35
0
 def get_submission_source(self, out_file_path, source):
     ft_file = django_to_filetracker_path(source)
     self.filetracker.get_file(ft_file, out_file_path, add_to_cache=False)
Example #36
0
 def get_submission_source(self, out_file_path, source):
     ft_file = django_to_filetracker_path(source)
     self.filetracker.get_file(ft_file, out_file_path, add_to_cache=False)
Example #37
0
    def fill_evaluation_environ(self, problem, environ, **kwargs):
        environ['recipe'] = [
                ('compile',
                    'oioioi.programs.handlers.compile'),
                recipe_placeholder('after_compile'),

                ('collect_tests',
                    'oioioi.programs.handlers.collect_tests'),

                ('initial_run_tests',
                    'oioioi.programs.handlers.run_tests',
                    dict(kind='EXAMPLE')),
                ('initial_grade_tests',
                    'oioioi.programs.handlers.grade_tests'),
                ('initial_grade_groups',
                    'oioioi.programs.handlers.grade_groups'),
                ('initial_grade_submission',
                    'oioioi.programs.handlers.grade_submission'),
                ('initial_make_report',
                    'oioioi.programs.handlers.make_report',
                    dict(kind='INITIAL')),
                recipe_placeholder('after_initial_tests'),

#                ('postpone_final',
#                    'oioioi.programs.handlers.postpone',
#                    dict(queue='evalmgr-lowprio')),

                ('final_run_tests',
                    'oioioi.programs.handlers.run_tests',
                    dict(kind='NORMAL')),
                ('final_grade_tests',
                    'oioioi.programs.handlers.grade_tests'),
                ('final_grade_groups',
                    'oioioi.programs.handlers.grade_groups'),
                ('final_grade_submission',
                    'oioioi.programs.handlers.grade_submission'),
                ('final_make_report',
                    'oioioi.programs.handlers.make_report'),
                recipe_placeholder('after_final_tests'),

                ('delete_executable',
                    'oioioi.programs.handlers.delete_executable'),
            ]

        environ['error_handlers'] = [ ('delete_executable',
                    'oioioi.programs.handlers.delete_executable') ]

        environ['group_scorer'] = \
                'oioioi.programs.utils.min_group_scorer'
        environ['score_aggregator'] = \
                'oioioi.programs.utils.sum_score_aggregator'

        checker = OutputChecker.objects.get(problem=problem).exe_file
        if checker:
            environ['checker'] = django_to_filetracker_path(checker)

        if getattr(settings, 'USE_UNSAFE_EXEC', False):
            environ['exec_mode'] = 'unsafe'
        else:
            environ['exec_mode'] = settings.SAFE_EXEC_MODE

        if getattr(settings, 'USE_LOCAL_COMPILERS', False):
            environ['compiler'] = 'system-' + environ['language']