def run(self): """Runs the suites associated with this doctest. RETURNS: bool; True if the doctest completely passes, False otherwise. """ output.off() log_id = output.new_log() format.print_line('-') print('Doctests for {}'.format(self.name)) print() if not self.docstring: print('-- No doctests found for {} --'.format(self.name)) success = False else: success = self.case.run() if success: print('-- OK! --') output.on() output_log = output.get_log(log_id) output.remove_log(log_id) if not success or self.verbose: print(''.join(output_log)) if not success and self.interactive: self.console.interact() if success: return {'passed': 1, 'failed': 0, 'locked': 0} else: return {'passed': 0, 'failed': 1, 'locked': 0}
def _run_case(self, test_name, suite_number, case, case_number): """A wrapper for case.run(). Prints informative output and also captures output of the test case and returns it as a log. The output is printed only if the case fails, or if self.verbose is True. """ output.off() # Delay printing until case status is determined. log_id = output.new_log() format.print_line('-') print('{} > Suite {} > Case {}'.format(test_name, suite_number, case_number)) print() success = case.run() if success: print('-- OK! --') output.on() output_log = output.get_log(log_id) output.remove_log(log_id) if not success or self.verbose: print(''.join(output_log)) if not success: short_name = self.test.get_short_name() # TODO: Change when in notebook mode print('Run only this test case with ' '"python3 ok -q {} --suite {} --case {}"'.format( short_name, suite_number, case_number)) return success
def _evaluate(self, code, frame=None): if frame is None: frame = self._frame log_id = output.new_log() try: try: result = timer.timed(self.timeout, eval, (code, frame)) except SyntaxError: timer.timed(self.timeout, exec, (code, frame)) result = None except RuntimeError as e: stacktrace_length = 9 stacktrace = traceback.format_exc().split('\n') print('Traceback (most recent call last):\n ...') print('\n'.join(stacktrace[-stacktrace_length:-1])) print('# Error: maximum recursion depth exceeded.') raise PythonConsoleException(e) except exceptions.Timeout as e: print('# Error: evaluation exceeded {} seconds.'.format(e.timeout)) raise PythonConsoleException(e) except Exception as e: stacktrace = traceback.format_exc() token = '<module>\n' index = stacktrace.rfind(token) + len(token) stacktrace = stacktrace[index:].rstrip('\n') if '\n' in stacktrace: print('Traceback (most recent call last):') print(stacktrace) raise PythonConsoleException(e) else: printed_output = ''.join(output.get_log(log_id)) return result, printed_output finally: output.remove_log(log_id)
def evaluate(self, code): log_id = output.new_log() try: try: result = timer.timed(self.timeout, eval, (code, self._frame)) except SyntaxError: timer.timed(self.timeout, exec, (code, self._frame)) result = None except RuntimeError as e: stacktrace_length = 15 stacktrace = traceback.format_exc().strip().split('\n') print('Traceback (most recent call last):\n ...') print('\n'.join(stacktrace[-stacktrace_length:])) raise interpreter.ConsoleException(e) except exceptions.Timeout as e: print('# Error: evaluation exceeded {} seconds.'.format(e.timeout)) raise interpreter.ConsoleException(e) except Exception as e: stacktrace = traceback.format_exc() token = '<string>' token_start = stacktrace.rfind(token) index = stacktrace.find('\n', token_start) + 1 stacktrace = stacktrace[index:].rstrip('\n') if '\n' in stacktrace: print('Traceback (most recent call last):') print(stacktrace) raise interpreter.ConsoleException(e) else: printed_output = ''.join(output.get_log(log_id)) return result, printed_output finally: output.remove_log(log_id)
def evaluate(self, code): if not code.strip(): # logic.scheme_read can't handle empty strings. return None, '' log_id = output.new_log() try: exp = self.logic.read_line(code) result = timer.timed(self.timeout, self.logic.scheme_eval, (exp, self._frame)) except RuntimeError as e: stacktrace_length = 15 stacktrace = traceback.format_exc().strip().split('\n') print('Traceback (most recent call last):\n ...') print('\n'.join(stacktrace[-stacktrace_length:])) raise interpreter.ConsoleException(e) except exceptions.Timeout as e: print('# Error: evaluation exceeded {} seconds.'.format(e.timeout)) raise interpreter.ConsoleException(e) except Exception as e: stacktrace = traceback.format_exc() token = '<module>\n' index = stacktrace.rfind(token) + len(token) stacktrace = stacktrace[index:].rstrip('\n') if '\n' in stacktrace: print('Traceback (most recent call last):') print(stacktrace) raise interpreter.ConsoleException(e) else: printed_output = ''.join(output.get_log(log_id)) return result, printed_output finally: output.remove_log(log_id)
def evaluate(self, code): if not code.strip(): # scheme.scheme_read can't handle empty strings. return None, '' log_id = output.new_log() try: exp = self.scheme.read_line(code) result = timer.timed(self.timeout, self.scheme.scheme_eval, (exp, self._frame)) except RuntimeError as e: stacktrace_length = 15 stacktrace = traceback.format_exc().strip().split('\n') print('Traceback (most recent call last):\n ...') print('\n'.join(stacktrace[-stacktrace_length:])) raise interpreter.ConsoleException(e) except exceptions.Timeout as e: print('# Error: evaluation exceeded {} seconds.'.format(e.timeout)) raise interpreter.ConsoleException(e) except self.scheme.SchemeError as e: print('# Error: {}'.format(e)) raise interpreter.ConsoleException(e, exception_type='SchemeError') except Exception as e: stacktrace = traceback.format_exc() token = '<module>\n' index = stacktrace.rfind(token) + len(token) stacktrace = stacktrace[index:].rstrip('\n') if '\n' in stacktrace: print('Traceback (most recent call last):') print(stacktrace) raise interpreter.ConsoleException(e) else: printed_output = ''.join(output.get_log(log_id)) return result, printed_output finally: output.remove_log(log_id)
def evaluate(self, code): log_id = output.new_log() try: try: result = timer.timed(self.timeout, eval, (code, self._frame)) except SyntaxError: timer.timed(self.timeout, exec, (code, self._frame)) result = None except RuntimeError as e: stacktrace_length = 15 stacktrace = traceback.format_exc().strip().split('\n') print('Traceback (most recent call last):\n ...') print('\n'.join(stacktrace[-stacktrace_length:])) raise interpreter.ConsoleException(e) except exceptions.Timeout as e: print( '# Error: evaluation exceeded {} seconds - check for infinite loops' .format(e.timeout)) raise interpreter.ConsoleException(e) except Exception as e: stacktrace = traceback.format_exc() token = '<string>' token_start = stacktrace.rfind(token) index = stacktrace.find('\n', token_start) + 1 stacktrace = stacktrace[index:].rstrip('\n') if '\n' in stacktrace: print('Traceback (most recent call last):') print(stacktrace) raise interpreter.ConsoleException(e) else: printed_output = ''.join(output.get_log(log_id)) return result, debug.remove_debug(printed_output) finally: output.remove_log(log_id)
def run(self): """Runs the suites associated with this doctest. RETURNS: bool; True if the doctest completely passes, False otherwise. """ output.off() log_id = output.new_log() format.print_line('-') print('Doctests for {}'.format(self.name)) print() success = self.case.run() if success: print('-- OK! --') output.on() output_log = output.get_log(log_id) output.remove_log(log_id) if not success or self.verbose: print(''.join(output_log)) if not success and self.interactive: self.console.interact() if success: return {'passed': 1, 'failed': 0, 'locked': 0} else: return {'passed': 0, 'failed': 1, 'locked': 0}
def evaluate(self, code): if not code: return None, '' log_id = output.new_log() with self._lark_execution_guard(): result = timer.timed(self.timeout, self._parser.parse, [], dict(text=code)) printed_output = ''.join(output.get_log(log_id)) return self.normalize(result.pretty()), debug.remove_debug(printed_output)
def grade(self, tests): data = {} print("Starting grading from external request") log_id = output.new_log() grade(tests, data, verbose=self.args.verbose) printed_output = ''.join(output.get_log(log_id)) output.remove_log(log_id) data['output'] = printed_output return data
def testRegisterLog_oneLog_outputOff(self): output.off() log_id = output.new_log() print(self.MESSAGE1) print(self.MESSAGE2) log = output.get_log(log_id) output.remove_log(log_id) self.assertEqual([self.MESSAGE1, "\n", self.MESSAGE2, "\n"], log)
def testRegisterLog_manyLogs_outputOff(self): output.off() log_id1 = output.new_log() log_id2 = output.new_log() print(self.MESSAGE1) log1 = output.get_log(log_id1) log2 = output.get_log(log_id2) output.remove_log(log_id1) self.assertEqual([self.MESSAGE1, "\n"], log1) self.assertEqual([self.MESSAGE1, "\n"], log2) print(self.MESSAGE2) log2 = output.get_log(log_id2) output.remove_log(log_id2) self.assertEqual([self.MESSAGE1, "\n"], log1) self.assertEqual([self.MESSAGE1, "\n", self.MESSAGE2, "\n"], log2)
class Suite(core.Serializable): type = core.String() scored = core.Boolean(default=True) cases = core.List() def __init__(self, verbose, interactive, timeout=None, **fields): super().__init__(**fields) self.verbose = verbose self.interactive = interactive self.timeout = timeout def run(self, test_name, suite_number): """Subclasses should override this method to run tests. PARAMETERS: test_name -- str; name of the parent test. suite_number -- int; suite number, assumed to be 1-indexed. RETURNS: dict; results of the following form: { 'passed': int, 'failed': int, 'locked': int, } """ raise NotImplementedError def _run_case(self, test_name, suite_number, case, case_number): """A wrapper for case.run(). Prints informative output and also captures output of the test case and returns it as a log. The output is suppressed -- it is up to the calling function to decide whether or not to print the log. """ output.off() # Delay printing until case status is determined. log_id = output.new_log() format.print_line('-') print('{} > Suite {} > Case {}'.format(test_name, suite_number, case_number)) print() success = case.run() if success: print('-- OK! --') output.on() output_log = output.get_log(log_id) output.remove_log(log_id) return success, output_log
def run(self, env): """Runs the suites associated with this doctest. NOTE: env is intended only for use with the programmatic API to support Python OK tests. It is not used here. RETURNS: bool; True if the doctest completely passes, False otherwise. """ output.off() log_id = output.new_log() format.print_line('-') print('Doctests for {}'.format(self.name)) print() if not self.docstring: print('-- No doctests found for {} --'.format(self.name)) if self.ignore_empty: success = True else: success = False else: success = self.case.run() if success: print('-- OK! --') output.on() output_log = output.get_log(log_id) output.remove_log(log_id) if not success or self.verbose: print(''.join(output_log)) if not success and self.interactive: self.console.interact() if success: return {'passed': 1, 'failed': 0, 'locked': 0} else: return {'passed': 0, 'failed': 1, 'locked': 0}
def _lark_execution_guard(self): log_id = output.new_log() try: yield except exceptions.Timeout as e: print('# Error: evaluation exceeded {} seconds.'.format(e.timeout)) raise interpreter.ConsoleException(e) except (LarkError, UnexpectedEOF) as e: print('# Error: {}'.format(e)) raise interpreter.ConsoleException(e) except Exception as e: stacktrace = traceback.format_exc() token = '<module>\n' index = stacktrace.rfind(token) + len(token) stacktrace = stacktrace[index:].rstrip('\n') if '\n' in stacktrace: print('Traceback (most recent call last):') print(stacktrace) raise interpreter.ConsoleException(e) finally: output.remove_log(log_id)
def __iter__(self): log_id = output.new_log() output_log = output.get_log(log_id) for line in self.lines: self.line_number += 1 match = self.EXPECT_PATTERN.match(line) if match: expected = match.group(1).split(';') for exp in expected: self.expected_output.append((exp.strip(), self.line_number)) # Split output based on newlines. output_lines = ''.join(output_log).split('\n') if len(output_lines) > self.last_out_len: self.output.extend(output_lines[-1-len(expected):-1]) else: self.output.extend([''] * len(expected)) self.last_out_len = len(output_lines) yield line output.remove_log(log_id) raise EOFError
def __iter__(self): log_id = output.new_log() output_log = output.get_log(log_id) for line in self.lines: self.line_number += 1 match = self.EXPECT_PATTERN.match(line) if match: expected = match.group(1).split(';') for exp in expected: self.expected_output.append( (exp.strip(), self.line_number)) # Split output based on newlines. output_lines = ''.join(output_log).split('\n') if len(output_lines) > self.last_out_len: self.output.extend(output_lines[-1 - len(expected):-1]) else: self.output.extend([''] * len(expected)) self.last_out_len = len(output_lines) yield line output.remove_log(log_id) raise EOFError
def _run_case(self, test_name, suite_number, case, case_number): """A wrapper for case.run(). Prints informative output and also captures output of the test case and returns it as a log. The output is suppressed -- it is up to the calling function to decide whether or not to print the log. """ output.off() # Delay printing until case status is determined. log_id = output.new_log() format.print_line('-') print('{} > Suite {} > Case {}'.format(test_name, suite_number, case_number)) print() success = case.run() if success: print('-- OK! --') output.on() output_log = output.get_log(log_id) output.remove_log(log_id) return success, output_log
class Suite(core.Serializable): type = core.String() scored = core.Boolean(default=True) cases = core.List() def __init__(self, test, verbose, interactive, timeout=None, **fields): super().__init__(**fields) self.test = test self.verbose = verbose self.interactive = interactive self.timeout = timeout self.run_only = [] def run(self, test_name, suite_number, env=None): """Subclasses should override this method to run tests. PARAMETERS: test_name -- str; name of the parent test. suite_number -- int; suite number, assumed to be 1-indexed. env -- dict; used by programmatic API to provide a custom environment to run tests with. RETURNS: dict; results of the following form: { 'passed': int, 'failed': int, 'locked': int, } """ raise NotImplementedError def enumerate_cases(self): enumerated = enumerate(self.cases) if self.run_only: return [x for x in enumerated if x[0] + 1 in self.run_only] return enumerated def _run_case(self, test_name, suite_number, case, case_number): """A wrapper for case.run(). Prints informative output and also captures output of the test case and returns it as a log. The output is printed only if the case fails, or if self.verbose is True. """ output.off() # Delay printing until case status is determined. log_id = output.new_log() format.print_line('-') print('{} > Suite {} > Case {}'.format(test_name, suite_number, case_number)) print() success = case.run() if success: print('-- OK! --') output.on() output_log = output.get_log(log_id) output.remove_log(log_id) if not success or self.verbose: print(''.join(output_log)) if not success: short_name = self.test.get_short_name() # TODO: Change when in notebook mode print('Run only this test case with ' '"python3 ok -q {} --suite {} --case {}"'.format( short_name, suite_number, case_number)) return success