def test_problem(self, problem, test_dir): self.output( ansi_style('Testing problem #ansi[%s](cyan|bold)...') % problem) self.output() fails = 0 for case in os.listdir(test_dir): if self.case_regex is not None and not self.case_regex.match(case): continue case_dir = os.path.join(test_dir, case) if os.path.isdir(case_dir): self.output( ansi_style( 'Running test case #ansi[%s](yellow|bold) for #ansi[%s](cyan|bold)...' ) % (case, problem)) try: case_fails = self.run_test_case(problem, case, case_dir) except Exception: fails += 1 self.output( ansi_style( '#ansi[Test case failed with exception:](red|bold)' )) self.output(traceback.format_exc()) else: self.output( ansi_style( 'Result of case #ansi[%s](yellow|bold) for #ansi[%s](cyan|bold): ' ) % (case, problem) + ansi_style([ '#ansi[Failed](red|bold)', '#ansi[Success](green|bold)' ][not case_fails])) fails += case_fails self.output()
def run_self_test(cls, sandbox=True, output=True, error_callback=None): if not cls.test_program: return True if output: print(ansi_style("%-39s%s" % ('Self-testing #ansi[%s](|underline):' % cls.get_executor_name(), '')), end=' ') try: executor = cls(cls.test_name, utf8bytes(cls.test_program)) proc = executor.launch(time=cls.test_time, memory=cls.test_memory) if sandbox else executor.launch_unsafe() test_message = b'echo: Hello, World!' stdout, stderr = proc.communicate(test_message + b'\n') res = stdout.strip() == test_message and not stderr if output: # Cache the versions now, so that the handshake packet doesn't take ages to generate cls.get_runtime_versions() print(ansi_style(['#ansi[Failed](red|bold)', '#ansi[Success](green|bold)'][res])) if stdout.strip() != test_message and error_callback: error_callback('Got unexpected stdout output:\n' + utf8text(stdout)) if stderr: if error_callback: error_callback('Got unexpected stderr output:\n' + utf8text(stderr)) else: print(stderr, file=sys.stderr) if hasattr(proc, 'protection_fault') and proc.protection_fault: print_protection_fault(proc.protection_fault) return res except Exception: if output: print(ansi_style('#ansi[Failed](red|bold)')) traceback.print_exc() if error_callback: error_callback(traceback.format_exc()) return False
def test_all(self): total_fails = 0 for problem, _ in get_supported_problems(): if self.problem_regex is not None and not self.problem_regex.match( problem): continue root = get_problem_root(problem) test_dir = os.path.join(root, 'tests') if os.path.isdir(test_dir): fails = self.test_problem(problem, test_dir) if fails: self.output( ansi_style( 'Problem #ansi[%s](cyan|bold) #ansi[failed %d case(s)](red|bold).' ) % (problem, fails)) else: self.output( ansi_style( 'Problem #ansi[%s](cyan|bold) passed with flying colours.' ) % problem) self.output() total_fails += fails return total_fails
def main(): global commands import logging from dmoj import judgeenv, executors judgeenv.load_env(cli=True) # Emulate ANSI colors with colorama if os.name == 'nt' and not judgeenv.no_ansi_emu: try: from colorama import init init() except ImportError as ignored: pass executors.load_executors() print 'Running local judge...' logging.basicConfig( filename=judgeenv.log_file, level=logging.INFO, format='%(levelname)s %(asctime)s %(module)s %(message)s') judge = LocalJudge() for warning in judgeenv.startup_warnings: print ansi_style('#ansi[Warning: %s](yellow)' % warning) del judgeenv.startup_warnings print for command in [ ListProblemsCommand, ListSubmissionsCommand, SubmitCommand, ResubmitCommand, RejudgeCommand, HelpCommand, QuitCommand ]: register(command(judge)) with judge: try: judge.listen() except: traceback.print_exc() finally: judge.murder() while True: command = raw_input( ansi_style("#ansi[dmoj](magenta)#ansi[>](green) ")).strip() line = command.split(' ') if line[0] in commands: cmd = commands[line[0]] try: cmd.execute(line[1:]) except InvalidCommandException: print else: print ansi_style('#ansi[Unrecognized command %s](red|bold)' % line[0]) print
def main(): logging.basicConfig(level=logging.INFO) sys.stdout = codecs.getwriter('utf-8')(os.fdopen(sys.stdout.fileno(), 'w', 0)) sys.stderr = codecs.getwriter('utf-8')(os.fdopen(sys.stderr.fileno(), 'w', 0)) judgeenv.load_env(cli=True, testsuite=True) executors.load_executors() executor_fail = not all(name in executors.executors for name in required_executors) if executor_fail: print( ansi_style('#ansi[A required executor failed to load.](red|bold)')) else: print( ansi_style( '#ansi[All required executors loaded successfully.](green|bold)' )) print() tester = Tester(judgeenv.problem_regex, judgeenv.case_regex) fails = tester.test_all() print() print('Test complete') if fails: print( ansi_style('#ansi[A total of %d case(s) failed](red|bold).') % fails) else: print(ansi_style('#ansi[All cases passed.](green|bold)')) raise SystemExit(int(executor_fail or fails != 0))
def execute(self, line): global submission_id_counter, graded_submissions args = self.arg_parser.parse_args(line) submission_id_counter += 1 try: id, lang, src, tl, ml = graded_submissions[args.submission_id - 1] except IndexError: print ansi_style("#ansi[invalid submission '%d'](red|bold)\n" % (args.submission_id - 1)) return id = args.problem or id lang = args.language or lang tl = args.time_limit or tl ml = args.memory_limit or ml err = None if id not in map(itemgetter(0), judgeenv.get_supported_problems()): err = "unknown problem '%s'" % id elif lang not in executors: err = "unknown language '%s'" % lang elif tl <= 0: err = '--time-limit must be >= 0' elif ml <= 0: err = '--memory-limit must be >= 0' if err: print ansi_style('#ansi[%s](red|bold)\n' % err) return graded_submissions.append((id, lang, src, tl, ml)) self.judge.begin_grading(submission_id_counter, id, lang, src, tl, ml, False, blocking=True)
def main(): judgeenv.load_env(cli=True, testsuite=True) # Emulate ANSI colors with colorama if os.name == 'nt' and not judgeenv.no_ansi_emu: try: from colorama import init init() except ImportError: pass logging.basicConfig( filename=judgeenv.log_file, level=logging.INFO, format='%(levelname)s %(asctime)s %(module)s %(message)s') executors.load_executors() tester = Tester(judgeenv.problem_regex, judgeenv.case_regex) fails = tester.test_all() print() print('Test complete') if fails: print( ansi_style('#ansi[A total of %d case(s) failed](red|bold).') % fails) else: print(ansi_style('#ansi[All cases passed.](green|bold)')) raise SystemExit(int(fails != 0))
def start(self): if self._monitor is not None: try: self._monitor.start() except OSError: print ansi_style( '#ansi[Warning: failed to start problem monitor!](yellow)')
def main(): # pragma: no cover sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) if not sanity_check(): return 1 from dmoj import judgeenv, executors judgeenv.load_env() # Emulate ANSI colors with colorama if os.name == 'nt' and not judgeenv.no_ansi_emu: try: from colorama import init init() except ImportError: pass executors.load_executors() print 'Running live judge...' for warning in judgeenv.startup_warnings: print ansi_style('#ansi[Warning: %s](yellow)' % warning) del judgeenv.startup_warnings if os.name == 'posix' and 'judges' in env: if env.pidfile: with open(env.pidfile) as f: f.write(str(os.getpid())) manager = JudgeManager(env.judges) manager.run() else: return judge_proc(need_monitor=True)
def _custom_invocation(self, language, source, memory_limit, time_limit, input_data): class InvocationGrader(graders.StandardGrader): def check_result(self, case, result): return not result.result_flag class InvocationProblem(object): id = 'CustomInvocation' time_limit = time_limit memory_limit = memory_limit class InvocationCase(object): config = ConfigNode({'unbuffered': False}) input_data = lambda: input_data grader = self.get_grader_from_source(InvocationGrader, InvocationProblem(), language, source) binary = grader.binary if grader else None if binary: self.packet_manager.invocation_begin_packet() try: result = grader.grade(InvocationCase()) except TerminateGrading: self.packet_manager.submission_terminated_packet() print(ansi_style('#ansi[Forcefully terminating invocation.](red|bold)')) pass except: self.internal_error() else: self.packet_manager.invocation_end_packet(result) print(ansi_style('Done invoking #ansi[%s](green|bold).\n' % (id,))) self._terminate_grading = False self.current_submission_thread = None self.current_submission = None
def main(): judgeenv.load_env(cli=True, testsuite=True) # Emulate ANSI colors with colorama if os.name == 'nt' and not judgeenv.no_ansi_emu: try: from colorama import init init() except ImportError: pass logging.basicConfig(filename=judgeenv.log_file, level=logging.INFO, format='%(levelname)s %(asctime)s %(module)s %(message)s') executors.load_executors() tester = Tester(judgeenv.problem_regex, judgeenv.case_regex) fails = tester.test_all() print() print('Test complete') if fails: print(ansi_style('#ansi[A total of %d case(s) failed](red|bold).') % fails) else: print(ansi_style('#ansi[All cases passed.](green|bold)')) raise SystemExit(int(fails != 0))
def test_problem(self, problem, test_dir): self.output(ansi_style('Testing problem #ansi[%s](cyan|bold)...') % problem) fails = 0 dirs = [case for case in os.listdir(test_dir) if self.case_regex is None or self.case_regex.match(case)] for i in range(len(dirs)): case = dirs[i] case_dir = os.path.join(test_dir, case) if os.path.isdir(case_dir): self.output(ansi_style('\tRunning test case #ansi[%s](yellow|bold) for #ansi[%s](cyan|bold)...') % (case, problem)) try: case_fails = self.run_test_case(problem, case, case_dir) except Exception: fails += 1 self.output(ansi_style('\t#ansi[Test case failed with exception:](red|bold)')) self.output(traceback.format_exc()) else: self.output(ansi_style('\tResult of case #ansi[%s](yellow|bold) for #ansi[%s](cyan|bold): ') % (case, problem) + ansi_style(['#ansi[Failed](red|bold)', '#ansi[Success](green|bold)'][not case_fails])) fails += case_fails if i != len(dirs) - 1: self.output() return fails
def execute(self, line): global submission_id_counter, graded_submissions args = self.arg_parser.parse_args(line) submission_id_counter += 1 try: id, lang, src, tl, ml = graded_submissions[args.submission_id - 1] except IndexError: print ansi_style("#ansi[invalid submission '%d'](red|bold)\n" % (args.submission_id - 1)) return id = args.problem or id lang = args.language or lang tl = args.time_limit or tl ml = args.memory_limit or ml err = None if id not in map(itemgetter(0), judgeenv.get_supported_problems()): err = "unknown problem '%s'" % id elif lang not in executors: err = "unknown language '%s'" % lang elif tl <= 0: err = '--time-limit must be >= 0' elif ml <= 0: err = '--memory-limit must be >= 0' if err: print ansi_style('#ansi[%s](red|bold)\n' % err) return graded_submissions.append((id, lang, src, tl, ml)) self.judge.begin_grading(submission_id_counter, id, lang, src, tl, ml, False, False, blocking=True)
def main(): logging.basicConfig(level=logging.INFO) sys.stdout = codecs.getwriter('utf-8')(os.fdopen(sys.stdout.fileno(), 'w', 0)) sys.stderr = codecs.getwriter('utf-8')(os.fdopen(sys.stderr.fileno(), 'w', 0)) judgeenv.load_env(cli=True, testsuite=True) judgeenv.env['id'] = 'testsuite' executors.load_executors() executor_fail = not all(name in executors.executors for name in required_executors) if executor_fail: print(ansi_style('#ansi[A required executor failed to load.](red|bold)')) else: print(ansi_style('#ansi[All required executors loaded successfully.](green|bold)')) print() tester = Tester(judgeenv.problem_regex, judgeenv.case_regex) fails = tester.test_all() print() print('Test complete') if fails: print(ansi_style('#ansi[A total of %d case(s) failed](red|bold).') % fails) else: print(ansi_style('#ansi[All cases passed.](green|bold)')) raise SystemExit(int(executor_fail or fails != 0))
def execute(self, line): _args = self.arg_parser.parse_args(line) if _args.limit is not None and _args.limit <= 0: print ansi_style("#ansi[--limit must be >= 0](red|bold)\n") return all_problems = judgeenv.get_supported_problems() if _args.filter: r = re.compile(_args.filter) all_problems = filter(lambda x: r.match(x[0]) is not None, all_problems) if _args.limit: all_problems = all_problems[:_args.limit] if len(all_problems): problems = iter(map(itemgetter(0), all_problems)) max_len = max(len(p[0]) for p in all_problems) for row in izip_longest(*[problems] * 4, fillvalue=''): print ' '.join(('%*s' % (-max_len, row[i])) for i in xrange(4)) else: print ansi_style( "#ansi[No problems matching filter found.](red|bold)") print
def execute(self, line): args = self.arg_parser.parse_args(line) for i, data in enumerate( graded_submissions if not args.limit else graded_submissions[:args.limit]): problem, lang, src, tl, ml = data print ansi_style('#ansi[%s](yellow)/#ansi[%s](green) in %s' % (problem, i + 1, lang)) print
def execute(self, line): args = self.arg_parser.parse_args(line) for i, data in enumerate(graded_submissions if not args.limit else graded_submissions[:args.limit]): problem, lang, src, tl, ml = data print ansi_style('#ansi[%s](yellow)/#ansi[%s](green) in %s' % (problem, i + 1, lang)) print
def _run_test_case(self, problem, case_dir, config): if 'skip' in config and config['skip']: self.output( ansi_style( '\t\t#ansi[Skipped](magenta|bold) - Unsupported on current platform' )) return 0 language = config['language'] if language not in all_executors: self.output( ansi_style( '\t\t#ansi[Skipped](magenta|bold) - Language not supported' )) return 0 time = config['time'] memory = config['memory'] if isinstance(config['source'], str): with open(os.path.join(case_dir, config['source'])) as f: sources = [f.read()] else: sources = [] for file in config['source']: with open(os.path.join(case_dir, file)) as f: sources += [f.read()] codes_all, codes_cases = self.parse_expect(config.get('expect', 'AC'), config.get('cases', {}), self.parse_expected_codes) score_all, score_cases = self.parse_expect( config.get('score'), config.get('score_cases', {}), self.parse_score) feedback_all, feedback_cases = self.parse_expect( config.get('feedback'), config.get('feedback_cases', {}), self.parse_feedback) extended_feedback_all, extended_feedback_cases = self.parse_expect( config.get('extended_feedback'), config.get('extended_feedback_cases', {}), self.parse_feedback) def output_case(data): self.output('\t\t' + data.strip()) fails = 0 for source in sources: self.sub_id += 1 self.manager.set_expected(codes_all, codes_cases, score_all, score_cases, feedback_all, feedback_cases, extended_feedback_all, extended_feedback_cases) self.judge.begin_grading( Submission(self.sub_id, problem, language, source, time, memory, False, {}), blocking=True, report=output_case, ) fails += self.manager.failed return fails
def execute(self, line): global graded_submissions args = self.arg_parser.parse_args(line) try: problem, lang, src, tl, ml = graded_submissions[args.submission_id - 1] except IndexError: print ansi_style("#ansi[invalid submission '%d'](red|bold)\n" % (args.submission_id - 1)) return self.judge.begin_grading(submission_id_counter, problem, lang, src, tl, ml, False, blocking=True)
def start(self): if self._monitor is not None: try: self._monitor.start() except OSError: logger.exception('Failed to start problem monitor.') print ansi_style( '#ansi[Warning: failed to start problem monitor!](yellow)') if self._refresher is not None: self._refresher.start()
def execute(self, line): global submission_id_counter, graded_submissions args = self.arg_parser.parse_args(line) problem_id = args.problem_id language_id = args.language_id time_limit = args.time_limit memory_limit = args.memory_limit err = None if problem_id not in map(itemgetter(0), judgeenv.get_supported_problems()): err = "unknown problem '%s'" % problem_id elif language_id not in executors: err = "unknown language '%s'" % language_id elif time_limit <= 0: err = '--time-limit must be >= 0' elif memory_limit <= 0: err = '--memory-limit must be >= 0' if not err: if args.source_file: try: with open(os.path.realpath(args.source_file), 'r') as f: src = f.read() except Exception as io: err = str(io) else: src = [] try: while True: s = raw_input() if s.strip() == ':q': raise EOFError src.append(s) except EOFError: # Ctrl+D src = '\n'.join(src) except Exception as io: err = str(io) if err: print ansi_style('#ansi[%s](red|bold)\n' % err) return submission_id_counter += 1 graded_submissions.append( (problem_id, language_id, src, time_limit, memory_limit)) self.judge.begin_grading(submission_id_counter, problem_id, language_id, src, time_limit, memory_limit, False, False, blocking=True)
def main(): global commands import logging from dmoj import judgeenv, executors judgeenv.load_env(cli=True) # Emulate ANSI colors with colorama if os.name == 'nt' and not judgeenv.no_ansi_emu: try: from colorama import init init() except ImportError: pass executors.load_executors() print 'Running local judge...' logging.basicConfig(filename=judgeenv.log_file, level=logging.INFO, format='%(levelname)s %(asctime)s %(module)s %(message)s') judge = LocalJudge() for warning in judgeenv.startup_warnings: print ansi_style('#ansi[Warning: %s](yellow)' % warning) del judgeenv.startup_warnings print for command in [ListProblemsCommand, ListSubmissionsCommand, SubmitCommand, ResubmitCommand, RejudgeCommand, HelpCommand, QuitCommand]: register(command(judge)) with judge: try: judge.listen() while True: command = raw_input(ansi_style("#ansi[dmoj](magenta)#ansi[>](green) ")).strip() line = command.split(' ') if line[0] in commands: cmd = commands[line[0]] try: cmd.execute(line[1:]) except InvalidCommandException: print else: print ansi_style('#ansi[Unrecognized command %s](red|bold)' % line[0]) print except (EOFError, KeyboardInterrupt): print finally: judge.murder()
def execute(self, line): args = self.arg_parser.parse_args(line) if args.limit is not None and args.limit <= 0: print ansi_style('#ansi[--limit must be >= 0](red|bold)\n') return for i, data in enumerate( graded_submissions if not args.limit else graded_submissions[:args.limit]): problem, lang, src, tl, ml = data print ansi_style('#ansi[%s](yellow)/#ansi[%s](green) in %s' % (problem, i + 1, lang)) print
def main(): sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0) judgeenv.load_env(cli=True, testsuite=True) executors.load_executors() executor_fail = not all(name in executors.executors for name in required_executors) if executor_fail: print ansi_style( '#ansi[A required executor failed to load.](red|bold)') else: print ansi_style( '#ansi[All required executors loaded successfully.](green|bold)') print tester = Tester(judgeenv.problem_regex, judgeenv.case_regex) fails = tester.test_all() print print 'Test complete' if fails: print ansi_style( '#ansi[A total of %d case(s) failed](red|bold).') % fails else: print ansi_style('#ansi[All cases passed.](green|bold)') raise SystemExit(int(executor_fail or fails != 0))
def execute(self, line): args = self.arg_parser.parse_args(line) if args.limit is not None and args.limit <= 0: raise InvalidCommandException("--limit must be >= 0") submissions = self.judge.graded_submissions if not args.limit else self.judge.graded_submissions[:args . limit] for i, (problem, lang, src, tl, ml) in enumerate(submissions): print ansi_style('#ansi[%s](yellow)/#ansi[%s](green) in %s' % (problem, i + 1, lang)) print
def _grading_thread_main(self, ipc_ready_signal: threading.Event, submission: Submission, report) -> None: try: report( ansi_style( 'Start grading #ansi[%s](yellow)/#ansi[%s](green|bold) in %s...' % (submission.problem_id, submission.id, submission.language))) self.current_judge_worker = JudgeWorker(submission) ipc_handler_dispatch: Dict[IPC, Callable] = { IPC.HELLO: lambda _report: ipc_ready_signal.set(), IPC.COMPILE_ERROR: self._ipc_compile_error, IPC.COMPILE_MESSAGE: self._ipc_compile_message, IPC.GRADING_BEGIN: self._ipc_grading_begin, IPC.GRADING_END: self._ipc_grading_end, IPC.GRADING_ABORTED: self._ipc_grading_aborted, IPC.BATCH_BEGIN: self._ipc_batch_begin, IPC.BATCH_END: self._ipc_batch_end, IPC.RESULT: self._ipc_result, IPC.UNHANDLED_EXCEPTION: self._ipc_unhandled_exception, } for ipc_type, data in self.current_judge_worker.communicate(): try: handler_func = ipc_handler_dispatch[ipc_type] except KeyError: raise RuntimeError( "judge got unexpected IPC message from worker: %s" % ((ipc_type, data), )) from None handler_func(report, *data) report( ansi_style( 'Done grading #ansi[%s](yellow)/#ansi[%s](green|bold).\n' % (submission.problem_id, submission.id))) except Exception: # noqa: E722, we want to catch everything self.log_internal_error() finally: if self.current_judge_worker is not None: self.current_judge_worker.stop() self.current_submission = None self.current_judge_worker = None # Might not have been set if an exception was encountered before HELLO message, so signal here to keep the # other side from waiting forever. ipc_ready_signal.set()
def get_grader_from_source(self, grader_class, problem, language, source): if isinstance(source, unicode): source = source.encode('utf-8') try: grader = grader_class(self, problem, language, source) except CompileError as ce: print ansi_style('#ansi[Failed compiling submission!](red|bold)') print ce.message, # don't print extra newline grader = None except: # if custom grader failed to initialize, report it to the site return self.internal_error() return grader
def main(): # pragma: no cover sys.stdout = codecs.getwriter("utf-8")(os.fdopen(sys.stdout.fileno(), 'w', 0)) sys.stderr = codecs.getwriter("utf-8")(sys.stderr) if not sanity_check(): return 1 from dmoj import judgeenv, executors judgeenv.load_env() # Emulate ANSI colors with colorama if os.name == 'nt' and not judgeenv.no_ansi_emu: try: from colorama import init init() except ImportError: pass executors.load_executors() if hasattr(signal, 'SIGUSR2'): signal.signal(signal.SIGUSR2, signal.SIG_IGN) print 'Running live judge...' for warning in judgeenv.startup_warnings: print ansi_style('#ansi[Warning: %s](yellow)' % warning) del judgeenv.startup_warnings if os.name == 'posix' and 'judges' in env: logfile = judgeenv.log_file try: logfile = logfile % 'master' except TypeError: pass logging.basicConfig( filename=logfile, level=logging.INFO, format='%(levelname)s %(asctime)s %(process)d %(name)s %(message)s' ) if env.pidfile: with open(env.pidfile) as f: f.write(str(os.getpid())) manager = JudgeManager(env.judges) manager.run() else: return judge_proc(need_monitor=True)
def grading_cleanup_wrapper(): report(ansi_style('Start grading #ansi[%s](yellow)/#ansi[%s](green|bold) in %s...' % (problem_id, id, language))) try: problem = Problem(problem_id, time_limit, memory_limit) self._block_and_grade(problem, language, source, short_circuit, meta, report=report) except Exception: self.log_internal_error() self._terminate_grading = False self.current_submission_id = None self.current_submission_thread = None self.current_grader = None report(ansi_style('Done grading #ansi[%s](yellow)/#ansi[%s](green|bold).\n' % (problem_id, id)))
def run_test_case(self, problem, case, case_dir): config = {} for file in self.case_files: try: with open(os.path.join(case_dir, file)) as f: config.update(yaml.safe_load(f.read())) except IOError: pass if not config: self.output(ansi_style('\t\t#ansi[Skipped](magenta|bold) - No usable test.yml')) return 0 if 'skip' in config and config['skip']: self.output(ansi_style('\t\t#ansi[Skipped](magenta|bold) - Unsupported on current platform')) return 0 language = config['language'] if language not in all_executors: self.output(ansi_style('\t\t#ansi[Skipped](magenta|bold) - Language not supported')) return 0 time = config['time'] memory = config['memory'] if isinstance(config['source'], six.string_types): with open(os.path.join(case_dir, config['source'])) as f: sources = [f.read()] else: sources = [] for file in config['source']: with open(os.path.join(case_dir, file)) as f: sources += [f.read()] codes_all, codes_cases = self.parse_expect(config.get('expect', 'AC'), config.get('cases', {}), self.parse_expected_codes) feedback_all, feedback_cases = self.parse_expect(config.get('feedback'), config.get('feedback_cases', {}), self.parse_feedback) def output_case(data): self.output('\t\t' + data.strip()) fails = 0 for source in sources: self.sub_id += 1 self.manager.set_expected(codes_all, codes_cases, feedback_all, feedback_cases) self.judge.begin_grading(self.sub_id, problem, language, source, time, memory, False, False, blocking=True, report=output_case) fails += self.manager.failed return fails
def run_command(line): if line[0] in commands: cmd = commands[line[0]] try: return cmd.execute(line[1:]) except InvalidCommandException as e: if e.message: print(ansi_style("#ansi[%s](red|bold)\n" % e.message)) print() return 1 else: print( ansi_style('#ansi[Unrecognized command %s](red|bold)' % line[0])) print() return 127
def begin_grading(self, submission: Submission, report=logger.info, blocking=False) -> None: # Ensure only one submission is running at a time; this lock is released at the end of submission grading. # This is necessary because `begin_grading` is "re-entrant"; after e.g. grading-end is sent, the network # thread may receive a new submission before the grading thread and worker from the *previous* submission # have finished tearing down. Trashing global state (e.g. `self.current_judge_worker`) before then would be # an error. self._grading_lock.acquire() assert self.current_judge_worker is None report( ansi_style( 'Start grading #ansi[%s](yellow)/#ansi[%s](green|bold) in %s...' % (submission.problem_id, submission.id, submission.language))) # FIXME(tbrindus): what if we receive an abort from the judge before IPC handshake completes? We'll send # an abort request down the pipe, possibly messing up the handshake. self.current_judge_worker = JudgeWorker(submission) ipc_ready_signal = threading.Event() grading_thread = threading.Thread(target=self._grading_thread_main, args=(ipc_ready_signal, report), daemon=True) grading_thread.start() ipc_ready_signal.wait() if blocking: grading_thread.join()
def open_editor(self, lang, src=b''): file_suffix = executors[lang].Executor.ext editor = os.environ.get('EDITOR') if editor: with tempfile.NamedTemporaryFile(suffix=file_suffix) as temp: temp.write(src) temp.flush() subprocess.call([editor, temp.name]) temp.seek(0) src = temp.read() else: print( ansi_style( '#ansi[$EDITOR not set, falling back to stdin](yellow)\n')) src = [] try: while True: s = input() if s.strip() == ':q': raise EOFError src.append(s) except EOFError: # Ctrl+D src = '\n'.join(src) except Exception as io: raise InvalidCommandException(str(io)) return src
def _ipc_result(self, report, batch_number: Optional[int], case_number: int, result: Result) -> None: codes = result.readable_codes() is_sc = result.result_flag & Result.SC colored_codes = [ '#ansi[%s](%s|bold)' % ('--' if x == 'SC' else x, Result.COLORS_BYID[x]) for x in codes ] colored_aux_codes = '{%s}' % ', '.join( colored_codes[1:]) if len(codes) > 1 else '' colored_feedback = '(#ansi[%s](|underline)) ' % utf8text( result.feedback) if result.feedback else '' if is_sc: case_info = '' else: case_info = '[%.3fs (%.3fs) | %dkb] %s%s' % ( result.execution_time, result.wall_clock_time, result.max_memory, colored_feedback, colored_aux_codes, ) case_padding = ' ' if batch_number is not None else '' report( ansi_style( '%sTest case %2d %-3s %s' % (case_padding, case_number, colored_codes[0], case_info))) self.packet_manager.test_case_status_packet(case_number, result)
def cli_main(): import logging from dmoj import judgeenv, executors judgeenv.load_env(cli=True) executors.load_executors() print('Running local judge...') logging.basicConfig( filename=judgeenv.log_file, level=logging.INFO, format='%(levelname)s %(asctime)s %(module)s %(message)s') judge = LocalJudge() for warning in judgeenv.startup_warnings: print_ansi('#ansi[Warning: %s](yellow)' % warning) del judgeenv.startup_warnings print() from dmoj.commands import all_commands, commands, register_command for command in all_commands: register_command(command(judge)) def run_command(line): if not line: return 127 if line[0] in commands: cmd = commands[line[0]] try: return cmd.execute(line[1:]) except InvalidCommandException as e: if e.message: print_ansi("#ansi[%s](red|bold)\n" % e.message) print() return 1 else: print_ansi('#ansi[Unrecognized command %s](red|bold)' % line[0]) print() return 127 with judge: try: judge.listen() if judgeenv.cli_command: return run_command(judgeenv.cli_command) else: while True: command = input( ansi_style( "#ansi[dmoj](magenta)#ansi[>](green) ")).strip() run_command(shlex.split(command)) except (EOFError, KeyboardInterrupt): print() finally: judge.murder()
def main(): result = {} if os.name == 'nt': judgeenv.load_env(cli=True) if not judgeenv.no_ansi_emu: try: from colorama import init init() except ImportError: pass judgeenv.env['runtime'] = {} for name in get_available(): executor = load_executor(name) if executor is None or not hasattr(executor, 'Executor'): continue if hasattr(executor.Executor, 'autoconfig'): print ansi_style( '%-43s%s' % ('Auto-configuring #ansi[%s](|underline):' % name, '')), try: data = executor.Executor.autoconfig() config = data[0] success = data[1] feedback = data[2] errors = '' if len(data) < 4 else data[3] except Exception: print ansi_style('#ansi[Not supported](red|bold)') traceback.print_exc() else: print ansi_style( ['#ansi[%s](red|bold)', '#ansi[%s](green|bold)'][success] % (feedback or ['Failed', 'Success'][success])) if not success: if config: print ' Attempted:' print ' ', yaml.dump( config, default_flow_style=False).rstrip().replace( '\n', '\n' + ' ' * 4) if errors: print ' Errors:' print ' ', errors.replace('\n', '\n' + ' ' * 4) if success: result.update(config) print print ansi_style('#ansi[Configuration result](green|bold|underline):') print yaml.dump({'runtime': result}, default_flow_style=False).rstrip()
def test_problem(self, problem, test_dir): self.output( ansi_style('Testing problem #ansi[%s](cyan|bold)...') % problem) fails = 0 dirs = [ case for case in os.listdir(test_dir) if self.case_regex is None or self.case_regex.match(case) ]
def start(self): if self._monitor is not None: try: self._monitor.start() except OSError: logger.exception('Failed to start problem monitor.') print(ansi_style('#ansi[Warning: failed to start problem monitor!](yellow)')) if self._refresher is not None: self._refresher.start()
def main(): sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) if not sanity_check(): return 1 import logging from dmoj import judgeenv, executors judgeenv.load_env() # Emulate ANSI colors with colorama if os.name == 'nt' and not judgeenv.no_ansi_emu: try: from colorama import init init() except ImportError as ignored: pass executors.load_executors() print 'Running live judge...' logging.basicConfig( filename=judgeenv.log_file, level=logging.INFO, format='%(levelname)s %(asctime)s %(module)s %(message)s') judge = ClassicJudge(judgeenv.server_host, judgeenv.server_port) for warning in judgeenv.startup_warnings: print ansi_style('#ansi[Warning: %s](yellow)' % warning) del judgeenv.startup_warnings print with judge: try: judge.listen() except KeyboardInterrupt: pass except: traceback.print_exc() finally: judge.murder()
def test_all(self): total_fails = 0 for problem, _ in get_supported_problems(): if self.problem_regex is not None and not self.problem_regex.match(problem): continue root = get_problem_root(problem) test_dir = os.path.join(root, 'tests') if os.path.isdir(test_dir): fails = self.test_problem(problem, test_dir) if fails: self.output(ansi_style('Problem #ansi[%s](cyan|bold) #ansi[failed %d case(s)](red|bold).') % (problem, fails)) else: self.output(ansi_style('Problem #ansi[%s](cyan|bold) passed with flying colours.') % problem) self.output() total_fails += fails return total_fails
def run_self_test(cls, sandbox=True, output=True, error_callback=None): if not cls.test_program: return True if output: print(ansi_style("%-39s%s" % ('Self-testing #ansi[%s](|underline):' % cls.get_executor_name(), '')), end=' ') try: executor = cls(cls.test_name, utf8bytes(cls.test_program)) proc = executor.launch(time=cls.test_time, memory=cls.test_memory) if sandbox else executor.launch_unsafe() test_message = b'echo: Hello, World!' stdout, stderr = proc.communicate(test_message + b'\n') if proc.tle: print(ansi_style('#ansi[Time Limit Exceeded](red|bold)')) return False if proc.mle: print(ansi_style('#ansi[Memory Limit Exceeded](red|bold)')) return False res = stdout.strip() == test_message and not stderr if output: # Cache the versions now, so that the handshake packet doesn't take ages to generate cls.get_runtime_versions() usage = '[%.3fs, %d KB]' % (proc.execution_time, proc.max_memory) print(ansi_style(['#ansi[Failed](red|bold)', '#ansi[Success](green|bold)'][res]), usage) if stdout.strip() != test_message and error_callback: error_callback('Got unexpected stdout output:\n' + utf8text(stdout)) if stderr: if error_callback: error_callback('Got unexpected stderr output:\n' + utf8text(stderr)) else: print(stderr, file=sys.stderr) if hasattr(proc, 'protection_fault') and proc.protection_fault: print_protection_fault(proc.protection_fault) return res except Exception: if output: print(ansi_style('#ansi[Failed](red|bold)')) traceback.print_exc() if error_callback: error_callback(traceback.format_exc()) return False
def run_self_test(cls, sandbox=True): if not cls.test_program: return True print ansi_style("%-39s%s" % ('Self-testing #ansi[%s](|underline):' % cls.name, '')), try: executor = cls(cls.test_name, cls.test_program) proc = executor.launch(time=cls.test_time, memory=cls.test_memory) if sandbox else executor.launch_unsafe() test_message = 'echo: Hello, World!' stdout, stderr = proc.communicate(test_message + '\n') res = stdout.strip() == test_message and not stderr print ansi_style(['#ansi[Failed](red|bold)', '#ansi[Success](green|bold)'][res]) if stderr: print>> sys.stderr, stderr return res except Exception: print 'Failed' import traceback traceback.print_exc() return False
def get_grader_from_source(self, grader_class, problem, language, source, report=print): try: grader = grader_class(self, problem, language, utf8bytes(source)) except CompileError as ce: report(ansi_style('#ansi[Failed compiling submission!](red|bold)')) report(ce.args[0].rstrip()) # don't print extra newline grader = None except: # if custom grader failed to initialize, report it to the site return self.internal_error() return grader
def execute(self, line): args = self.arg_parser.parse_args(line) if args.limit is not None and args.limit <= 0: raise InvalidCommandException("--limit must be >= 0") submissions = self.judge.graded_submissions if not args.limit else self.judge.graded_submissions[:args.limit] for i, (problem, lang, src, tl, ml) in enumerate(submissions): print(ansi_style('#ansi[%s](yellow)/#ansi[%s](green) in %s' % (problem, i + 1, lang))) print()
def main(): sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) if not sanity_check(): return 1 import logging from dmoj import judgeenv, executors judgeenv.load_env() # Emulate ANSI colors with colorama if os.name == 'nt' and not judgeenv.no_ansi_emu: try: from colorama import init init() except ImportError: pass executors.load_executors() print 'Running live judge...' logging.basicConfig(filename=judgeenv.log_file, level=logging.INFO, format='%(levelname)s %(asctime)s %(module)s %(message)s') judge = ClassicJudge(judgeenv.server_host, judgeenv.server_port) for warning in judgeenv.startup_warnings: print ansi_style('#ansi[Warning: %s](yellow)' % warning) del judgeenv.startup_warnings print with judge: try: judge.listen() except KeyboardInterrupt: pass except: traceback.print_exc() finally: judge.murder()
def run_self_test(cls, sandbox=True, output=True, error_callback=None): if not cls.test_program: return True if output: print ansi_style("%-39s%s" % ('Self-testing #ansi[%s](|underline):' % cls.__module__.split('.')[-1], '')), try: executor = cls(cls.test_name, cls.test_program) proc = executor.launch(time=cls.test_time, memory=cls.test_memory) if sandbox else executor.launch_unsafe() test_message = 'echo: Hello, World!' stdout, stderr = proc.communicate(test_message + '\n') res = stdout.strip() == test_message and not stderr if output: print ansi_style(['#ansi[Failed](red|bold)', '#ansi[Success](green|bold)'][res]) if stderr: if error_callback: error_callback('Got unexpected stderr output:\n' + stderr) else: print>> sys.stderr, stderr return res except Exception: if output: print ansi_style('#ansi[Failed](red|bold)') traceback.print_exc() if error_callback: error_callback(traceback.format_exc()) return False
def main(): result = {} if os.name == 'nt': judgeenv.load_env(cli=True) if not judgeenv.no_ansi_emu: try: from colorama import init init() except ImportError: pass judgeenv.env['runtime'] = {} for name in get_available(): executor = load_executor(name) if executor is None or not hasattr(executor, 'Executor'): continue if hasattr(executor.Executor, 'autoconfig'): print ansi_style('%-43s%s' % ('Auto-configuring #ansi[%s](|underline):' % name, '')), try: data = executor.Executor.autoconfig() config = data[0] success = data[1] feedback = data[2] errors = '' if len(data) < 4 else data[3] except Exception: print ansi_style('#ansi[Not supported](red|bold)') traceback.print_exc() else: print ansi_style(['#ansi[%s](red|bold)', '#ansi[%s](green|bold)'][success] % (feedback or ['Failed', 'Success'][success])) if not success: if config: print ' Attempted:' print ' ', yaml.dump(config, default_flow_style=False).rstrip().replace('\n', '\n' + ' ' * 4) if errors: print ' Errors:' print ' ', errors.replace('\n', '\n' + ' ' * 4) if success: result.update(config) print print ansi_style('#ansi[Configuration result](green|bold|underline):') print yaml.dump({'runtime': result}, default_flow_style=False).rstrip()
def execute(self, line): global submission_id_counter, graded_submissions args = self.arg_parser.parse_args(line) problem_id = args.problem_id language_id = args.language_id time_limit = args.time_limit memory_limit = args.memory_limit err = None if problem_id not in map(itemgetter(0), judgeenv.get_supported_problems()): err = "unknown problem '%s'" % problem_id elif language_id not in executors: err = "unknown language '%s'" % language_id elif time_limit <= 0: err = '--time-limit must be >= 0' elif memory_limit <= 0: err = '--memory-limit must be >= 0' if err: print ansi_style('#ansi[%s](red|bold)\n' % err) return src = [] try: while True: s = raw_input() if s.strip() == ':q': raise EOFError src.append(s) except EOFError: # Ctrl+D src = '\n'.join(src) submission_id_counter += 1 graded_submissions.append((problem_id, language_id, src, time_limit, memory_limit)) self.judge.begin_grading(submission_id_counter, problem_id, language_id, src, time_limit, memory_limit, False, blocking=True)
def execute(self, line): _args = self.arg_parser.parse_args(line) if _args.limit is not None and _args.limit <= 0: print ansi_style("#ansi[--limit must be >= 0](red|bold)\n") return all_problems = judgeenv.get_supported_problems() if _args.filter: r = re.compile(_args.filter) all_problems = filter(lambda x: r.match(x[0]) is not None, all_problems) if _args.limit: all_problems = all_problems[:_args.limit] if len(all_problems): problems = iter(map(itemgetter(0), all_problems)) max_len = max(len(p[0]) for p in all_problems) for row in izip_longest(*[problems] * 4, fillvalue=''): print ' '.join(('%*s' % (-max_len, row[i])) for i in xrange(4)) else: print ansi_style("#ansi[No problems matching filter found.](red|bold)") print
def main(): # pragma: no cover unicode_stdout_stderr() if not sanity_check(): return 1 from dmoj import judgeenv, executors judgeenv.load_env() # Emulate ANSI colors with colorama if os.name == 'nt' and not judgeenv.no_ansi_emu: try: from colorama import init init() except ImportError: pass executors.load_executors() if hasattr(signal, 'SIGUSR2'): signal.signal(signal.SIGUSR2, signal.SIG_IGN) print('Running live judge...') for warning in judgeenv.startup_warnings: print(ansi_style('#ansi[Warning: %s](yellow)' % warning)) del judgeenv.startup_warnings if os.name == 'posix' and 'judges' in env: logfile = judgeenv.log_file try: logfile = logfile % 'master' except TypeError: pass logging.basicConfig(filename=logfile, level=logging.INFO, format='%(levelname)s %(asctime)s %(process)d %(name)s %(message)s') if env.pidfile: with open(env.pidfile) as f: f.write(str(os.getpid())) manager = JudgeManager(env.judges) manager.run() else: return judge_proc(need_monitor=True)
def open_editor(self, lang, src=b''): file_suffix = executors[lang].Executor.ext editor = os.environ.get('EDITOR') if editor: with tempfile.NamedTemporaryFile(suffix=file_suffix) as temp: temp.write(src) temp.flush() subprocess.call([editor, temp.name]) temp.seek(0) src = temp.read() else: print(ansi_style('#ansi[$EDITOR not set, falling back to stdin](yellow)\n')) src = [] try: while True: s = input() if s.strip() == ':q': raise EOFError src.append(s) except EOFError: # Ctrl+D src = '\n'.join(src) except Exception as io: raise InvalidCommandException(str(io)) return src
def ci_test(executors_to_test, overrides): result = {} failed = False for name in executors_to_test: executor = import_module('dmoj.executors.' + name) print(ansi_style('%-34s%s' % ('Testing #ansi[%s](|underline):' % name, '')), end=' ') if not hasattr(executor, 'Executor'): failed = True print(ansi_style('#ansi[Does not export](red|bold) #ansi[Executor](red|underline)')) continue if not hasattr(executor.Executor, 'autoconfig'): print(ansi_style('#ansi[Could not autoconfig](magenta|bold)')) continue try: if name in overrides: if not overrides[name]: print(ansi_style('#ansi[Environment not found on Travis](red)')) continue print(ansi_style('#ansi[(manual config)](yellow)'), end=' ') data = executor.Executor.autoconfig_run_test(overrides[name]) else: data = executor.Executor.autoconfig() config = data[0] success = data[1] feedback = data[2] errors = '' if len(data) < 4 else data[3] except Exception: failed = True print(ansi_style('#ansi[Autoconfig broken](red|bold)')) traceback.print_exc() else: print(ansi_style(['#ansi[%s](red|bold)', '#ansi[%s](green|bold)'][success] % (feedback or ['Failed', 'Success'][success]))) if success: result.update(config) executor.Executor.runtime_dict = config executors[name] = executor for runtime, ver in executor.Executor.get_runtime_versions(): print( ansi_style(' #ansi[%s](cyan): %s' % (runtime, '.'.join(map(str, ver)) if ver else 'unknown'))) else: if feedback == 'Could not find JVM': continue if config: print(' Attempted:') print(' ', yaml.safe_dump(config, default_flow_style=False).rstrip().replace('\n', '\n' + ' ' * 4)) if errors: print(' Errors:') print(' ', errors.replace('\n', '\n' + ' ' * 4)) failed = True print() print(ansi_style('#ansi[Configuration result](green|bold|underline):')) print(yaml.safe_dump({'runtime': result}, default_flow_style=False).rstrip()) print() if failed: print(ansi_style('#ansi[Executor configuration failed.](red|bold).')) else: print(ansi_style('#ansi[Executor configuration succeeded.](green|bold).')) print() print() print('Running test cases...') judgeenv.problem_dirs = [os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'testsuite'))] tester = Tester() fails = tester.test_all() print() print('Test complete') if fails: print(ansi_style('#ansi[A total of %d case(s) failed](red|bold).') % fails) else: print(ansi_style('#ansi[All cases passed.](green|bold)')) failed |= fails != 0 raise SystemExit(int(failed))
def _begin_grading(self, problem_id, language, source, time_limit, memory_limit, short_circuit, pretests_only, report=print): submission_id = self.current_submission report(ansi_style('Start grading #ansi[%s](yellow)/#ansi[%s](green|bold) in %s...' % (problem_id, submission_id, language))) try: problem = Problem(problem_id, time_limit, memory_limit, load_pretests_only=pretests_only) except Exception: return self.internal_error() if 'signature_grader' in problem.config: grader_class = graders.SignatureGrader elif 'custom_judge' in problem.config: grader_class = graders.CustomGrader else: grader_class = graders.StandardGrader grader = self.get_grader_from_source(grader_class, problem, language, source, report=report) binary = grader.binary if grader else None # the compiler may have failed, or an error could have happened while initializing a custom judge # either way, we can't continue if binary: self.packet_manager.begin_grading_packet(problem.is_pretested) batch_counter = 1 in_batch = False # cases are indexed at 1 case_number = 1 try: for result in self.grade_cases(grader, problem.cases, short_circuit=short_circuit): if isinstance(result, BatchBegin): self.packet_manager.batch_begin_packet() report(ansi_style("#ansi[Batch #%d](yellow|bold)" % batch_counter)) in_batch = True elif isinstance(result, BatchEnd): self.packet_manager.batch_end_packet() batch_counter += 1 in_batch = False else: codes = result.readable_codes() # here be cancer is_sc = (result.result_flag & Result.SC) colored_codes = list(map(lambda x: '#ansi[%s](%s|bold)' % ('--' if x == 'SC' else x, Result.COLORS_BYID[x]), codes)) colored_aux_codes = '{%s}' % ', '.join(colored_codes[1:]) if len(codes) > 1 else '' colored_feedback = '(#ansi[%s](|underline)) ' % utf8text(result.feedback) if result.feedback else u'' case_info = '[%.3fs (%.3fs) | %dkb] %s%s' % (result.execution_time, result.r_execution_time, result.max_memory, colored_feedback, colored_aux_codes) if not is_sc else '' case_padding = ' ' * in_batch report(ansi_style('%sTest case %2d %-3s %s' % (case_padding, case_number, colored_codes[0], case_info))) self.packet_manager.test_case_status_packet(case_number, result) case_number += 1 except TerminateGrading: self.packet_manager.submission_terminated_packet() report(ansi_style('#ansi[Forcefully terminating grading. ' 'Temporary files may not be deleted.](red|bold)')) pass except: self.internal_error() else: self.packet_manager.grading_end_packet() report(ansi_style('Done grading #ansi[%s](yellow)/#ansi[%s](green|bold).\n' % (problem_id, submission_id))) self._terminate_grading = False self.current_submission_thread = None self.current_submission = None self.current_grader = None
def error_output(self, message): print(ansi_style('#ansi[%s](red)') % message)
def _begin_grading(self, problem_id, language, source, time_limit, memory_limit, short_circuit): submission_id = self.current_submission print ansi_style('Start grading #ansi[%s](yellow)/#ansi[%s](green|bold) in %s...' % (problem_id, submission_id, language)) try: problem = Problem(problem_id, time_limit, memory_limit) except InvalidInitException: return self.internal_error() if 'signature_grader' in problem.config: grader_class = graders.SignatureGrader elif 'custom_judge' in problem.config: grader_class = graders.CustomGrader else: grader_class = graders.StandardGrader if isinstance(source, unicode): source = source.encode('utf-8') try: grader = grader_class(self, problem, language, source) except CompileError as ce: print ansi_style('#ansi[Failed compiling submission!](red|bold)') print ce.message, # don't print extra newline grader = None except: # if custom grader failed to initialize, report it to the site return self.internal_error() binary = grader.binary if grader else None # the compiler may have failed, or an error could have happened while initializing a custom judge # either way, we can't continue if binary: self.packet_manager.begin_grading_packet() batch_counter = 1 in_batch = False try: for case_number, result in enumerate(self.grade_cases(grader, problem.cases, short_circuit=short_circuit)): if isinstance(result, BatchBegin): self.packet_manager.batch_begin_packet() print ansi_style("#ansi[Batch #%d](yellow|bold)" % batch_counter) in_batch = True elif isinstance(result, BatchEnd): self.packet_manager.batch_end_packet() batch_counter += 1 in_batch = False else: codes = result.readable_codes() # here be cancer is_sc = (result.result_flag & Result.SC) colored_codes = map(lambda x: '#ansi[%s](%s|bold)' % ('--' if x == 'SC' else x, Result.COLORS_BYID[x]), codes) colored_aux_codes = '{%s}' % ', '.join(colored_codes[1:]) if len(codes) > 1 else '' colored_feedback = '(#ansi[%s](|underline)) ' % result.feedback if result.feedback else '' case_info = '[%.3fs | %dkb] %s%s' % (result.execution_time, result.max_memory, colored_feedback, colored_aux_codes) if not is_sc else '' case_padding = ' ' * in_batch print ansi_style('%sTest case %2d %-3s %s' % (case_padding, case_number + 1, colored_codes[0], case_info)) # cases are indexed at 1 self.packet_manager.test_case_status_packet( case_number + 1, result.points, result.case.points, result.result_flag, result.execution_time, result.max_memory, result.proc_output[:result.case.output_prefix_length].decode('utf-8', 'replace'), result.feedback) except TerminateGrading: self.packet_manager.submission_terminated_packet() print ansi_style('#ansi[Forcefully terminating grading. Temporary files may not be deleted.](red|bold)') pass except: self.internal_error() else: self.packet_manager.grading_end_packet() print ansi_style('Done grading #ansi[%s](yellow)/#ansi[%s](green|bold).' % (problem_id, submission_id)) print self._terminate_grading = False self.current_submission_thread = None self.current_submission = None self.current_grader = None