def report_unexpected_exception(self, out, test, example, exc_info): """ Report that the given example raised an unexpected exception. """ out( self._failure_header(test, example) + "Exception raised:\n" + _indent(_exception_traceback(exc_info)))
def report_unexpected_exception(self, out, test, example, exc_info) -> None: """Report that the given example raised an unexpected exception by displaying the exception as virtualtext.""" # Get the exception traceback traceback = doctest._exception_traceback(exc_info) traceback_string = '# ' + traceback.split('\n')[-2].strip() # Check whether detailed traceback info is enabled if self.traceback_info: for line in [ line.strip() for line in reversed(traceback.split('\n')[:-2]) ]: # Handle lines differently based on whether they're # showing the code that created the error, or the # place where that code was found if line[0:4] == 'File': path = re.sub(r'(^[^"]*")|("[^"]*$)', '', line) # Depending on whether the error was a result off # code in the current file or another, display the # file path or the line number if path == self.filepath: lineno = re.sub( r'(^.*line (?=\d))|((?<=\d), in .*$)', '', line) traceback_string += f' on line {lineno}' pass else: # Show the relative path to the file instead of # the absolute path to save space relpath = os.path.relpath(path) traceback_string += f' in {relpath}' else: traceback_string += f'; by `{line}`' # Display the virtualtext self.nvim.api.buf_set_virtual_text( 0, self.namespace_id, example.lineno + test.lineno + 1, [[traceback_string, "Error"]], {})
def __run(self, test, out): """ Run the examples in `test`. Write the outcome of each example with one of the `DocTestRunner.report_*` methods, using the writer function `out`. `compileflags` is the set of compiler flags that should be used to execute examples. Return a tuple `(f, t)`, where `t` is the number of examples tried, and `f` is the number of examples that failed. """ # Keep track of the number of failures and tries. failures = tries = 0 # Save the option flags (since option directives can be used # to modify them). original_optionflags = self.optionflags SUCCESS, FAILURE, BOOM = range(3) # `outcome` state check = self._checker.check_output # Process each example. for examplenum, example in enumerate(test.examples): # If REPORT_ONLY_FIRST_FAILURE is set, then suppress # reporting after the first failure. quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and failures > 0) # Merge in the example's options. self.optionflags = original_optionflags if example.options: for (optionflag, val) in example.options.items(): if val: self.optionflags |= optionflag else: self.optionflags &= ~optionflag # If 'SKIP' is set, then skip this example. if self.optionflags & SKIP: continue # Record that we started this example. tries += 1 if not quiet: self.report_start(out, test, example) # Run the example in the given context, and record # any exception that gets raised. (But don't intercept # keyboard interrupts.) got = "" try: # Don't blink! This is where the user's code gets run. src = example.source.strip().replace('"""',r'\"""') # restore ans cleared by the separator println self.julia.stdin.write('ans=_ans;'.encode('utf-8')) # run command show = 'true' if src[-1] != ';' else 'false' cmd = 'Base.eval_user_input(Base.parse_input_line(raw""" ' \ + src + ' """),' + show + ');' self.julia.stdin.write(cmd.encode('utf-8')) # save ans, and make sure no more output is generated self.julia.stdin.write('_ans=ans; nothing\n'.encode('utf-8')) # read separator sep = 'fjsdiij3oi123j42' self.julia.stdin.write(('println("' + sep + '")\n').encode('utf-8')) self.julia.stdin.flush() got = [] line = '' while line[:-1] != sep: got.append(line) line = self.julia.stdout.readline().decode('utf-8').rstrip() + '\n' got = ''.join(got).expandtabs() exception = None except KeyboardInterrupt: raise except: exception = sys.exc_info() #got = self._fakeout.getvalue() # the actual output self._fakeout.truncate(0) outcome = FAILURE # guilty until proved innocent or insane # If the example executed without raising any exceptions, # verify its output. if exception is None: if check(example.want, got, self.optionflags): outcome = SUCCESS # The example raised an exception: check if it was expected. else: exc_msg = traceback.format_exception_only(*exception[:2])[-1] if not quiet: got += _exception_traceback(exception) # If `example.exc_msg` is None, then we weren't expecting # an exception. if example.exc_msg is None: outcome = BOOM # We expected an exception: see whether it matches. elif check(example.exc_msg, exc_msg, self.optionflags): outcome = SUCCESS # Another chance if they didn't care about the detail. elif self.optionflags & IGNORE_EXCEPTION_DETAIL: m1 = re.match(r'(?:[^:]*\.)?([^:]*:)', example.exc_msg) m2 = re.match(r'(?:[^:]*\.)?([^:]*:)', exc_msg) if m1 and m2 and check(m1.group(1), m2.group(1), self.optionflags): outcome = SUCCESS # Report the outcome. if outcome is SUCCESS: if not quiet: self.report_success(out, test, example, got) elif outcome is FAILURE: if not quiet: self.report_failure(out, test, example, got) failures += 1 elif outcome is BOOM: if not quiet: self.report_unexpected_exception(out, test, example, exception) failures += 1 else: assert False, ("unknown outcome", outcome) # Restore the option flags (in case they were modified) self.optionflags = original_optionflags # Record and return the number of failures and tries. self.__record_outcome(test, failures, tries) return TestResults(failures, tries)
def report_unexpected_exception(self, out, test, example, exc_info): """ Report that the given example raised an unexpected exception. """ out(self._failure_header(test, example) + 'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _DocTestRunner__run(self, test, compileflags, out): """ Run the examples in `test`. Write the outcome of each example with one of the `DocTestRunner.report_*` methods, using the writer function `out`. `compileflags` is the set of compiler flags that should be used to execute examples. Return a tuple `(f, t)`, where `t` is the number of examples tried, and `f` is the number of examples that failed. The examples are run in the namespace `test.globs`. """ # Keep track of the number of failures and tries. failures = tries = 0 # Save the option flags (since option directives can be used # to modify them). original_optionflags = self.optionflags SUCCESS, FAILURE, BOOM = range(3) # `outcome` state check = self._checker.check_output # Process each example. for examplenum, example in enumerate(test.examples): # If REPORT_ONLY_FIRST_FAILURE is set, then suppress # reporting after the first failure. quiet = (self.optionflags & doctest.REPORT_ONLY_FIRST_FAILURE and failures > 0) # Merge in the example's options. self.optionflags = original_optionflags if example.options: for (optionflag, val) in example.options.items(): if val: self.optionflags |= optionflag else: self.optionflags &= ~optionflag # If 'SKIP' is set, then skip this example. if self.optionflags & doctest.SKIP: continue # Record that we started this example. tries += 1 if not quiet: self.report_start(out, test, example) # Use a special filename for compile(), so we can retrieve # the source code during interactive debugging (see # __patched_linecache_getlines). filename = '<doctest %s[%d]>' % (test.name, examplenum) # Run the example in the given context (globs), and record # any exception that gets raised. (But don't intercept # keyboard interrupts.) try: # Don't blink! This is where the user's code gets run. exec( compile(example.source, filename, "exec", compileflags, 1), test.globs) self.debugger.set_continue() # ==== Example Finished ==== exception = None except KeyboardInterrupt: raise except Exception: exception = sys.exc_info() self.debugger.set_continue() # ==== Example Finished ==== got = self._fakeout.getvalue() # the actual output self._fakeout.truncate(0) outcome = FAILURE # guilty until proved innocent or insane # If the example executed without raising any exceptions, # verify its output. if exception is None: if check(example.want, got, self.optionflags): outcome = SUCCESS # The example raised an exception: check if it was expected. else: exc_msg = traceback.format_exception_only(*exception[:2])[-1] if not quiet: got += doctest._exception_traceback(exception) # If `example.exc_msg` is None, then we weren't expecting # an exception. if example.exc_msg is None: outcome = BOOM # We expected an exception: see whether it matches. elif check(example.exc_msg, exc_msg, self.optionflags): outcome = SUCCESS # Another chance if they didn't care about the detail. elif self.optionflags & doctest.IGNORE_EXCEPTION_DETAIL: if check(doctest._strip_exception_details(example.exc_msg), doctest._strip_exception_details(exc_msg), self.optionflags): outcome = SUCCESS # Report the outcome. if outcome is SUCCESS: if not quiet: self.report_success(out, test, example, got) elif outcome is FAILURE: if not quiet: self.report_failure(out, test, example, got) failures += 1 elif outcome is BOOM: if not quiet: self.report_unexpected_exception(out, test, example, exception) failures += 1 else: assert False, ("unknown outcome", outcome) if failures and self.optionflags & doctest.FAIL_FAST: break # Restore the option flags (in case they were modified) self.optionflags = original_optionflags # Record and return the number of failures and tries. self._DocTestRunner__record_outcome(test, failures, tries) return doctest.TestResults(failures, tries)
def _DocTestRunner__run(self, test, compileflags, out): """ Run the examples in `test`. Write the outcome of each example with one of the `DocTestRunner.report_*` methods, using the writer function `out`. `compileflags` is the set of compiler flags that should be used to execute examples. Return a tuple `(f, t)`, where `t` is the number of examples tried, and `f` is the number of examples that failed. The examples are run in the namespace `test.globs`. """ # Keep track of the number of failures and tries. failures = tries = 0 # Save the option flags (since option directives can be used # to modify them). original_optionflags = self.optionflags SUCCESS, FAILURE, BOOM = range(3) # `outcome` state check = self._checker.check_output # Process each example. for examplenum, example in enumerate(test.examples): # If REPORT_ONLY_FIRST_FAILURE is set, then supress # reporting after the first failure. quiet = (self.optionflags & doctest.REPORT_ONLY_FIRST_FAILURE and failures > 0) # Merge in the example's options. self.optionflags = original_optionflags if example.options: for (optionflag, val) in example.options.items(): if val: self.optionflags |= optionflag else: self.optionflags &= ~optionflag # Record that we started this example. tries += 1 if not quiet: self.report_start(out, test, example) # Use a special filename for compile(), so we can retrieve # the source code during interactive debugging (see # __patched_linecache_getlines). filename = '<doctest %s[%d]>' % (test.name, examplenum) # Run the example in the given context (globs), and record # any exception that gets raised. (But don't intercept # keyboard interrupts.) try: # Don't blink! This is where the user's code gets run. self.run_example(example.source, filename, compileflags, test.globs) self.debugger.set_continue() # ==== Example Finished ==== exception = None except KeyboardInterrupt: raise except: exception = sys.exc_info() self.debugger.set_continue() # ==== Example Finished ==== got = self._fakeout.getvalue() # the actual output self._fakeout.truncate(0) outcome = FAILURE # guilty until proved innocent or insane # If the example executed without raising any exceptions, # verify its output. if exception is None: if check(example.want, got, self.optionflags): outcome = SUCCESS # The example raised an exception: check if it was expected. else: exc_info = sys.exc_info() exc_msg = traceback.format_exception_only(*exc_info[:2])[-1] if not quiet: got += doctest._exception_traceback(exc_info) # If `example.exc_msg` is None, then we weren't expecting # an exception. if example.exc_msg is None: outcome = BOOM # We expected an exception: see whether it matches. elif check(example.exc_msg, exc_msg, self.optionflags): outcome = SUCCESS # Another chance if they didn't care about the detail. elif self.optionflags & doctest.IGNORE_EXCEPTION_DETAIL: m1 = re.match(r'[^:]*:', example.exc_msg) m2 = re.match(r'[^:]*:', exc_msg) if m1 and m2 and check(m1.group(0), m2.group(0), self.optionflags): outcome = SUCCESS # Report the outcome. if outcome is SUCCESS: if not quiet: self.report_success(out, test, example, got) elif outcome is FAILURE: if not quiet: self.report_failure(out, test, example, got) failures += 1 elif outcome is BOOM: if not quiet: self.report_unexpected_exception(out, test, example, exc_info) failures += 1 else: assert False, ("unknown outcome", outcome) # Restore the option flags (in case they were modified) self.optionflags = original_optionflags # Record and return the number of failures and tries. self._DocTestRunner__record_outcome(test, failures, tries) return failures, tries
def __run(self, test, out): """ Run the examples in `test`. Write the outcome of each example with one of the `DocTestRunner.report_*` methods, using the writer function `out`. `compileflags` is the set of compiler flags that should be used to execute examples. Return a tuple `(f, t)`, where `t` is the number of examples tried, and `f` is the number of examples that failed. """ # Keep track of the number of failures and tries. failures = tries = 0 # Save the option flags (since option directives can be used # to modify them). original_optionflags = self.optionflags SUCCESS, FAILURE, BOOM = range(3) # `outcome` state check = self._checker.check_output # Process each example. for examplenum, example in enumerate(test.examples): # If REPORT_ONLY_FIRST_FAILURE is set, then suppress # reporting after the first failure. quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and failures > 0) # Merge in the example's options. self.optionflags = original_optionflags if example.options: for (optionflag, val) in example.options.items(): if val: self.optionflags |= optionflag else: self.optionflags &= ~optionflag # If 'SKIP' is set, then skip this example. if self.optionflags & SKIP: continue # Record that we started this example. tries += 1 if not quiet: self.report_start(out, test, example) # Run the example in the given context, and record # any exception that gets raised. (But don't intercept # keyboard interrupts.) got = "" try: # Don't blink! This is where the user's code gets run. src = example.source.strip().replace('"""', r'\"""') # restore ans cleared by the separator println self.julia.stdin.write('ans=_ans;'.encode('utf-8')) # run command show = 'true' if src[-1] != ';' else 'false' cmd = 'Base.eval_user_input(Base.parse_input_line(raw""" ' \ + src + ' """),' + show + ');' self.julia.stdin.write(cmd.encode('utf-8')) # save ans, and make sure no more output is generated self.julia.stdin.write('_ans=ans; nothing\n'.encode('utf-8')) # read separator sep = 'fjsdiij3oi123j42' self.julia.stdin.write( ('println("\\n' + sep + '")\n').encode('utf-8')) self.julia.stdin.flush() got = [] line = '' while line[:-1] != sep: got.append(line) line = self.julia.stdout.readline().decode( 'utf-8').rstrip() + '\n' got = ''.join(got).expandtabs()[:-1] exception = None except KeyboardInterrupt: raise except: exception = sys.exc_info() #got = self._fakeout.getvalue() # the actual output self._fakeout.truncate(0) outcome = FAILURE # guilty until proved innocent or insane # If the example executed without raising any exceptions, # verify its output. if exception is None: if check(example.want, got, self.optionflags): outcome = SUCCESS # The example raised an exception: check if it was expected. else: exc_msg = traceback.format_exception_only(*exception[:2])[-1] if not quiet: got += _exception_traceback(exception) # If `example.exc_msg` is None, then we weren't expecting # an exception. if example.exc_msg is None: outcome = BOOM # We expected an exception: see whether it matches. elif check(example.exc_msg, exc_msg, self.optionflags): outcome = SUCCESS # Another chance if they didn't care about the detail. elif self.optionflags & IGNORE_EXCEPTION_DETAIL: m1 = re.match(r'(?:[^:]*\.)?([^:]*:)', example.exc_msg) m2 = re.match(r'(?:[^:]*\.)?([^:]*:)', exc_msg) if m1 and m2 and check(m1.group(1), m2.group(1), self.optionflags): outcome = SUCCESS # Report the outcome. if outcome is SUCCESS: if not quiet: self.report_success(out, test, example, got) elif outcome is FAILURE: if not quiet: self.report_failure(out, test, example, got) failures += 1 elif outcome is BOOM: if not quiet: self.report_unexpected_exception(out, test, example, exception) failures += 1 else: assert False, ("unknown outcome", outcome) # Restore the option flags (in case they were modified) self.optionflags = original_optionflags # Record and return the number of failures and tries. self.__record_outcome(test, failures, tries) return TestResults(failures, tries)
def report_unexpected_exception(self, out, test, example, exc_info): """Report that the given example raised an unexpected exception.""" header = 'Exception raised in {}' backtrace = doctest._indent(doctest._exception_traceback(exc_info)) err = CheckstyleRunner.error_node(test, example, header, backtrace) self.checkstyle_errors.append(err)
def update_event(self, inp=-1): self.set_output_val(0, doctest._exception_traceback(self.input(0)))