def execute(self, line):
        args = self.arg_parser.parse_args(line)

        problem_id = args.problem_id
        language_id = args.language_id
        time_limit = args.time_limit
        memory_limit = args.memory_limit
        source_file = args.source_file

        if language_id not in executors:
            source_file = language_id
            language_id = None  # source file / language id optional

        if problem_id not in map(itemgetter(0),
                                 judgeenv.get_supported_problems()):
            raise InvalidCommandException("unknown problem '%s'" % problem_id)
        elif not language_id:
            if source_file:
                filename, dot, ext = source_file.partition('.')
                if not ext:
                    raise InvalidCommandException('invalid file name')
                else:
                    # TODO: this should be a proper lookup elsewhere
                    ext = ext.upper()
                    language_id = {
                        'PY': 'PY2',
                        'CPP': 'CPP11',
                        'JAVA': 'JAVA8'
                    }.get(ext, ext)
            else:
                raise InvalidCommandException("no language is selected")
        elif language_id not in executors:
            raise InvalidCommandException("unknown language '%s'" %
                                          language_id)
        elif time_limit <= 0:
            raise InvalidCommandException('--time-limit must be >= 0')
        elif memory_limit <= 0:
            raise InvalidCommandException('--memory-limit must be >= 0')

        src = self.get_source(
            source_file) if source_file else self.open_editor(language_id)

        self.judge.submission_id_counter += 1
        self.judge.graded_submissions.append(
            (problem_id, language_id, src, time_limit, memory_limit))
        self.judge.begin_grading(
            Submission(self.judge.submission_id_counter, problem_id,
                       language_id, src, time_limit, memory_limit, False, {}),
            blocking=True,
            report=print,
        )
Beispiel #2
0
    def execute(self, line: str) -> None:
        args = self.arg_parser.parse_args(line)

        problem_id: str = args.problem_id
        language_id: Optional[str] = args.language_id
        time_limit: float = args.time_limit
        memory_limit: int = args.memory_limit
        source_file: Optional[str] = args.source_file

        if language_id not in executors.executors:
            source_file = language_id
            language_id = None  # source file / language id optional

        if problem_id not in judgeenv.get_supported_problems():
            raise InvalidCommandException(f"unknown problem '{problem_id}'")
        elif not language_id:
            if source_file:
                language_id = executors.from_filename(
                    source_file).Executor.name
            else:
                raise InvalidCommandException('no language is selected')
        elif language_id not in executors.executors:
            raise InvalidCommandException(f"unknown language '{language_id}'")
        elif time_limit <= 0:
            raise InvalidCommandException('--time-limit must be >= 0')
        elif memory_limit <= 0:
            raise InvalidCommandException('--memory-limit must be >= 0')

        assert language_id is not None
        src = self.get_source(
            source_file) if source_file else self.open_editor(language_id)

        self.judge.submission_id_counter += 1
        self.judge.graded_submissions.append(
            (problem_id, language_id, src, time_limit, memory_limit))
        try:
            self.judge.begin_grading(
                Submission(self.judge.submission_id_counter, problem_id,
                           language_id, src, time_limit, memory_limit, False,
                           {}),
                blocking=True,
                report=print,
            )
        except KeyboardInterrupt:
            self.judge.abort_grading()
Beispiel #3
0
    def _receive_packet(self, packet: dict):
        name = packet['name']
        if name == 'ping':
            self.ping_packet(packet['when'])
        elif name == 'get-current-submission':
            self.current_submission_packet()
        elif name == 'submission-request':
            self.submission_acknowledged_packet(packet['submission-id'])
            from dmoj.judge import Submission

            self.judge.begin_grading(
                Submission(
                    id=packet['submission-id'],
                    problem_id=packet['problem-id'],
                    language=packet['language'],
                    source=packet['source'],
                    time_limit=float(packet['time-limit']),
                    memory_limit=int(packet['memory-limit']),
                    short_circuit=packet['short-circuit'],
                    meta=packet['meta'],
                ))
            self._batch = 0
            log.info(
                'Accept submission: %d: executor: %s, code: %s',
                packet['submission-id'],
                packet['language'],
                packet['problem-id'],
            )
        elif name == 'terminate-submission':
            log.info('Received abortion request for %s',
                     self.judge.current_submission.id)
            self.judge.abort_grading()
        elif name == 'disconnect':
            log.info('Received disconnect request, shutting down...')
            self.disconnect()
        else:
            log.error('Unknown packet %s, payload %s', name, packet)
        points_all, points_cases = self.parse_expect(
            config.get('points'), config.get('points_cases', {}), self.parse_points
        )
        feedback_all, feedback_cases = self.parse_expect(
            config.get('feedback'), config.get('feedback_cases', {}), self.parse_feedback
        )

        def output_case(data):
            self.output('\t\t' + data.strip())

        fails = 0
        for source in sources:
            self.sub_id += 1
            self.manager.set_expected(codes_all, codes_cases, points_all, points_cases, feedback_all, feedback_cases)
            self.judge.begin_grading(
                Submission(self.sub_id, problem, language, source, time, memory, False, {}),
                blocking=True,
                report=output_case,
            )
            fails += self.manager.failed
        return fails

    def parse_expect(self, all, cases, func):
        expect = func(all)
        if isinstance(cases, list):
            cases = enumerate(cases, 1)
        else:
            cases = cases.items()
        case_expect = {id: func(codes) for id, codes in cases}
        return expect, case_expect
Beispiel #5
0
    def _run_test_case(self, problem, case_dir, config):
        if 'skip' in config and config['skip']:
            self.output(
                ansi_style(
                    '\t\t#ansi[Skipped](magenta|bold) - Unsupported on current platform'
                ))
            return 0

        language = config['language']
        if language not in all_executors:
            self.output(
                ansi_style(
                    '\t\t#ansi[Skipped](magenta|bold) - Language not supported'
                ))
            return 0
        time = config['time']
        memory = config['memory']
        if isinstance(config['source'], str):
            with open(os.path.join(case_dir, config['source'])) as f:
                sources = [f.read()]
        else:
            sources = []
            for file in config['source']:
                with open(os.path.join(case_dir, file)) as f:
                    sources += [f.read()]
        codes_all, codes_cases = self.parse_expect(config.get('expect', 'AC'),
                                                   config.get('cases', {}),
                                                   self.parse_expected_codes)
        score_all, score_cases = self.parse_expect(
            config.get('score'), config.get('score_cases', {}),
            self.parse_score)
        feedback_all, feedback_cases = self.parse_expect(
            config.get('feedback'), config.get('feedback_cases', {}),
            self.parse_feedback)
        extended_feedback_all, extended_feedback_cases = self.parse_expect(
            config.get('extended_feedback'),
            config.get('extended_feedback_cases', {}), self.parse_feedback)

        def output_case(data):
            self.output('\t\t' + data.strip())

        fails = 0
        for source in sources:
            self.sub_id += 1
            self.manager.set_expected(
                codes_all,
                codes_cases,
                score_all,
                score_cases,
                feedback_all,
                feedback_cases,
                extended_feedback_all,
                extended_feedback_cases,
            )
            self.judge.begin_grading(
                Submission(self.sub_id, problem, language, source, time,
                           memory, False, {}),
                blocking=True,
                report=output_case,
            )
            fails += self.manager.failed
        return fails