Esempio n. 1
0
    def report_start(self, out, test, example):
        if 1 <= self._verbosity <= 2:
            src = example.source.split('\n')[0]
            if len(src) > 60: src = src[:57]+'...'
            if isinstance(src, unicode): src = src.encode('utf8')
            lineno = test.lineno + example.lineno + 1
            if self._verbosity == 1:
                if self._stderr_term.CLEAR_LINE:
                    sys.__stderr__.write(self._stderr_term.CLEAR_LINE)
                else:
                    sys.__stderr__.write('\n')
            sys.__stderr__.write('%s  [Line %s] %s%s' %
                                 (self._stderr_term.BOLD, lineno,
                                  self._stderr_term.NORMAL, src))
            if self._verbosity == 2:
                sys.__stderr__.write('\n')

        else:
            DocTestRunner.report_start(self, out, test, example)
        sys.__stdout__.flush()
        self._current_test = (test, example)

        # Total hack warning: This munges the original source to
        # catch any keyboard interrupts, and turn them into special
        # ValueError interrupts.
        example.original_source = example.source
        if self._kbinterrupt_continue:
            example.source = ('try:\n%sexcept KeyboardInterrupt:\n    '
                              'raise ValueError("KEYBOARD-INTERRUPT")\n' %
                              doctest._indent(example.source))
Esempio n. 2
0
 def DTC_runTest(self):
     test = self._dt_test
     old = sys.stdout
     new = StringIO()
     optionflags = self._dt_optionflags
     if not (optionflags & REPORTING_FLAGS):
         # The option flags don't include any reporting flags,
         # so add the default reporting flags
         optionflags |= _unittest_reportflags
     # Patching doctestcase to enable verbose mode
     global g_doctest_verbose
     runner = DocTestRunner(optionflags=optionflags,
                            checker=self._dt_checker,
                            verbose=g_doctest_verbose)
     # End of patch
     try:
         runner.DIVIDER = "-" * 70
         failures, tries = runner.run(test,
                                      out=new.write,
                                      clear_globs=False)
     finally:
         sys.stdout = old
     if failures:
         raise self.failureException(self.format_failure(new.getvalue()))
     elif g_doctest_verbose:
         print new.getvalue()
Esempio n. 3
0
def t():
    if sys.version_info[0] == 2:
        # This test is disabled in Python2. There are too many subtle differences in the syntax ('str' has to be renamed 'unicode',
        # 'u' prefix is needed in front of string literals, etc), it's too hacky to preserve compatibility.
        #
        # In any case this test isn't to verify that the library works in Python2, it's too check that the README is up to date
        # with the code. So it doesn't matter.
        #
        return

    readme_file_path = path.join(path.dirname(__file__), '..', 'README.md')
    with open(readme_file_path, 'rb') as file_in:
        doctest_str = '\n\n'.join(
            re.findall(
                r'```python\s+(.+?)```',
                file_in.read().decode('UTF-8'),
                flags=re.S,
            ), )
    assert doctest_str
    parser = DocTestParser()
    runner = DocTestRunner()
    runner.run(
        parser.get_doctest(
            doctest_str,
            dict(globals(), json=json, pickle=pickle),
            'README.md',
            'README.md',
            0,
        ), )
    assert runner.failures == 0
Esempio n. 4
0
 def DTC_runTest(self):
     test = self._dt_test
     old = sys.stdout
     new = StringIO()
     optionflags = self._dt_optionflags
     if not (optionflags & REPORTING_FLAGS):
         # The option flags don't include any reporting flags,
         # so add the default reporting flags
         optionflags |= _unittest_reportflags
     # Patching doctestcase to enable verbose mode
     global g_doctest_verbose
     runner = DocTestRunner(optionflags=optionflags,
                            checker=self._dt_checker,
                            verbose=g_doctest_verbose)
     # End of patch
     try:
         runner.DIVIDER = "-"*70
         failures, tries = runner.run(
             test, out=new.write, clear_globs=False)
     finally:
         sys.stdout = old
     if failures:
         raise self.failureException(self.format_failure(new.getvalue()))
     elif g_doctest_verbose:
         print new.getvalue()
Esempio n. 5
0
    def report_start(self, out, test, example):
        if 1 <= self._verbosity <= 2:
            src = example.source.split('\n')[0]
            if len(src) > 60: src = src[:57]+'...'
            lineno = test.lineno + example.lineno + 1
            if self._verbosity == 1:
                if self._stderr_term.CLEAR_LINE:
                    sys.__stderr__.write(self._stderr_term.CLEAR_LINE)
                else:
                    sys.__stderr__.write('\n')
            sys.__stderr__.write('%s  [Line %s] %s%s' %
                                 (self._stderr_term.BOLD, lineno,
                                  self._stderr_term.NORMAL, src))
            if self._verbosity == 2:
                sys.__stderr__.write('\n')
            
        else:
            DocTestRunner.report_start(self, out, test, example)
        sys.__stdout__.flush()
        self._current_test = (test, example)

        # Total hack warning: This munges the original source to
        # catch any keyboard interrupts, and turn them into special
        # ValueError interrupts.
        example.original_source = example.source
        if self._kbinterrupt_continue:
            example.source = ('try:\n%sexcept KeyboardInterrupt:\n    '
                              'raise ValueError("KEYBOARD-INTERRUPT")\n' %
                              doctest._indent(example.source))
Esempio n. 6
0
 def __init__(self, checker=None, verbosity=1, optionflags=0,
              kbinterrupt_continue=False):
     DocTestRunner.__init__(self, checker, (verbosity>2), optionflags)
     self._verbosity = verbosity
     self._current_test = None
     self._term = TerminalController()
     self._stderr_term = TerminalController(sys.__stderr__)
     self._kbinterrupt_continue = kbinterrupt_continue
Esempio n. 7
0
 def __init__(self, checker=None, verbosity=1, optionflags=0,
              kbinterrupt_continue=False):
     DocTestRunner.__init__(self, checker, (verbosity>2), optionflags)
     self._verbosity = verbosity
     self._current_test = None
     self._term = TerminalController()
     self._stderr_term = TerminalController(sys.__stderr__)
     self._kbinterrupt_continue = kbinterrupt_continue
Esempio n. 8
0
 def __init__(self, optionflags, encoding):
     optionflags |= _unittest_reportflags
     BaseDocTestRunner.__init__(
         self,
         checker=OutputChecker(encoding),
         verbose=False,
         optionflags=optionflags,
     )
Esempio n. 9
0
def run_doctests(module, examples):
    from doctest import DocTest, DocTestRunner, ELLIPSIS

    dt = DocTest(examples, module.__dict__, module.__file__, None, None, None)
    dtr = DocTestRunner(optionflags=ELLIPSIS)

    dtr.run(dt, clear_globs=False)

    return dtr
Esempio n. 10
0
    def run_examples(self):
        from doctest import DocTest, DocTestRunner
        examples = sum([part.examples for part in self.parts if isinstance(part, DocTestPart)],[])
        dt = DocTest(examples, self.module.__dict__, self.filename, None, None, None)
        dtr = DocTestRunner()

        def tmp_out(message_to_throw_away):
            # TODO capture error messages, warn
            return

        dtr.run(dt, out=tmp_out, clear_globs=False)
Esempio n. 11
0
def test_readme_examples():
    readme_file_path = path.join(path.dirname(__file__), '..', 'README.md')
    with open(readme_file_path, 'rt', encoding='UTF-8') as file_in:
        all_blocks = re.findall(r'```(\w+)\s+(.+?)```',
                                file_in.read(),
                                flags=re.S)
    with TemporaryDirectory() as temp_dir:
        chdir(temp_dir)
        for syntax, block in all_blocks:
            if syntax == 'console':
                command_match = re.search(r'^\$ (\w+) (.+)\s+', block)
                if not command_match:
                    raise ValueError(block)
                print(command_match.group().rstrip())
                command, args = command_match.groups()
                block = block[command_match.end():]

                if command == 'cat':
                    # save the sample file to an actual file
                    file_name = args
                    with open(path.join(temp_dir, file_name),
                              'wt',
                              encoding='UTF-8') as file_out:
                        file_out.write(block)

                else:
                    # check that the command output is as expcted
                    actual_output = check_output(
                        '%s %s' % (command, args),
                        shell=True,
                        cwd=temp_dir,
                        encoding='UTF-8',
                        env={
                            **environ,
                            # `canif --help` reads this, and it can vary in the CI environment, so make it fixed
                            'COLUMNS':
                            '71',
                        },
                    )
                    print(actual_output)
                    assert actual_output == block

            elif syntax == 'python':
                parser = DocTestParser()
                test = parser.get_doctest(block, {'canif': canif}, 'README.md',
                                          'README.md', 0)
                runner = DocTestRunner()
                runner.run(test)
                assert not runner.failures
Esempio n. 12
0
    def run_examples(self):
        from doctest import DocTest, DocTestRunner
        examples = sum([
            part.examples
            for part in self.parts if isinstance(part, DocTestPart)
        ], [])
        dt = DocTest(examples, self.module.__dict__, self.filename, None, None,
                     None)
        dtr = DocTestRunner()

        def tmp_out(message_to_throw_away):
            # TODO capture error messages, warn
            return

        dtr.run(dt, out=tmp_out, clear_globs=False)
Esempio n. 13
0
 def test(self):
     # Make a new runner per function to be tested
     runner = DocTestRunner(verbose=d2u.verbose)
     for the_test in d2u.finder.find(func, func.__name__):
         runner.run(the_test)
     failed = count_failures(runner)
     if failed:
         # Since we only looked at a single function's docstring,
         # failed should contain at most one item.  More than that
         # is a case we can't handle and should error out on
         if len(failed) > 1:
             err = "Invalid number of test results: %s" % failed
             raise ValueError(err)
         # Report a normal failure.
         self.fail('failed doctests: %s' % str(failed[0]))
Esempio n. 14
0
 def test(self):
     # Make a new runner per function to be tested
     runner = DocTestRunner(verbose=d2u.verbose)
     for the_test in d2u.finder.find(func, func.__name__):
         runner.run(the_test)
     failed = count_failures(runner)
     if failed:
         # Since we only looked at a single function's docstring,
         # failed should contain at most one item.  More than that
         # is a case we can't handle and should error out on
         if len(failed) > 1:
             err = "Invalid number of test results:" % failed
             raise ValueError(err)
         # Report a normal failure.
         self.fail('failed doctests: %s' % str(failed[0]))
Esempio n. 15
0
    def run(self, test, compileflags=None, out=None, clear_globs=True):
        save_stderr = sys.stderr
        sys.stderr = _SpoofOut()

        if self._verbosity > 0:
            print((
                self._stderr_term.CYAN+self._stderr_term.BOLD+
                'Testing %s...'%test.name+self._stderr_term.NORMAL), file=save_stderr)
        try:
            fails, tries = DocTestRunner.run(self, test, compileflags,
                                             out, clear_globs)
        except KeyboardInterrupt:
            if self._current_test is None: raise

            print(self._failure_header(*self._current_test), file=save_stderr)
            print((
                self._stderr_term.RED+self._stderr_term.BOLD+
                'Keyboard Interrupt!'+self._stderr_term.NORMAL), file=save_stderr)
        if self._verbosity == 1:
            save_stderr.write(self._stderr_term.CLEAR_LINE)
        if self._verbosity > 0:
            if fails:
                print((
                    self._stderr_term.RED+self._stderr_term.BOLD+
                    '  %d example(s) failed!'%fails+self._stderr_term.NORMAL), file=save_stderr)
            else:
                print((
                    self._stderr_term.GREEN+self._stderr_term.BOLD+
                    '  All examples passed'+self._stderr_term.NORMAL), file=save_stderr)
        print(file=save_stderr)
        sys.stderr = save_stderr
Esempio n. 16
0
def teststring(s, name, globs=None, verbose=None, report=True, 
               optionflags=0, extraglobs=None, raise_on_error=False, 
               parser=doctest.DocTestParser()):

    from doctest import DebugRunner, DocTestRunner, master

    # Assemble the globals.
    if globs is None:
        globs = {}
    else:
        globs = globs.copy()
    if extraglobs is not None:
        globs.update(extraglobs)

    if raise_on_error:
        runner = DebugRunner(verbose=verbose, optionflags=optionflags)
    else:
        runner = DocTestRunner(verbose=verbose, optionflags=optionflags)

    test = parser.get_doctest(s, globs, name, name, 0)
    runner.run(test)

    if report:
        runner.summarize()

    if master is None:
        master = runner
    else:
        master.merge(runner)

    return runner.failures, runner.tries
Esempio n. 17
0
    def run(self, test, compileflags=None, out=None, clear_globs=True):
        save_stderr = sys.stderr
        sys.stderr = _SpoofOut()
        
        if self._verbosity > 0:
            print >>save_stderr, (
                self._stderr_term.CYAN+self._stderr_term.BOLD+
                'Testing %s...'%test.name+self._stderr_term.NORMAL)
        try:
            fails, tries = DocTestRunner.run(self, test, compileflags,
                                             out, clear_globs)
        except KeyboardInterrupt:
            if self._current_test is None: raise

            print >>save_stderr, self._failure_header(*self._current_test)
            print >>save_stderr, (
                self._stderr_term.RED+self._stderr_term.BOLD+
                'Keyboard Interrupt!'+self._stderr_term.NORMAL)
        if self._verbosity == 1:
            save_stderr.write(self._stderr_term.CLEAR_LINE)
        if self._verbosity > 0:
            if fails:
                print >>save_stderr, (
                    self._stderr_term.RED+self._stderr_term.BOLD+
                    '  %d example(s) failed!'%fails+self._stderr_term.NORMAL)
            else:
                print >>save_stderr, (
                    self._stderr_term.GREEN+self._stderr_term.BOLD+
                    '  All examples passed'+self._stderr_term.NORMAL)
        print >>save_stderr
        sys.stderr = save_stderr
Esempio n. 18
0
def ic_testmod(m,
               name=None,
               globs=None,
               verbose=None,
               report=True,
               optionflags=0,
               extraglobs=None,
               raise_on_error=False,
               exclude_empty=False):
    """See original code in doctest.testmod."""
    if name is None:
        name = m.__name__
    finder = DocTestFinder(exclude_empty=exclude_empty)
    if raise_on_error:
        runner = DebugRunner(checker=Py23DocChecker(),
                             verbose=verbose,
                             optionflags=optionflags)
    else:
        runner = DocTestRunner(checker=Py23DocChecker(),
                               verbose=verbose,
                               optionflags=optionflags)
    for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
        runner.run(test)

    if report:
        runner.summarize()

    return doctest.TestResults(runner.failures, runner.tries)
Esempio n. 19
0
def _test_docstr(docstr, verbose=True, optionflags=0, raise_on_error=True):
    parser = DocTestParser()
    if raise_on_error:
        runner = DebugRunner(verbose=verbose, optionflags=optionflags)
    else:
        runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
    test = parser.get_doctest(docstr, {}, __name__, __file__, 0)
    runner.run(test)
Esempio n. 20
0
 def __init__(self, args, assignment):
     super().__init__(args, assignment)
     # The environment in which the doctests are run (global vars)
     self.good_env = {}
     self.verb = self.args.verbose
     # Initialize the doctest module objects that will do the testing/parse
     self.parser = DocTestParser()
     self.runner = DocTestRunner(verbose=self.verb, optionflags=FAIL_FAST)
     self.lines_exec = 0
     self.lines_total = 0
Esempio n. 21
0
 def __init__(self, args, assignment):
     super().__init__(args, assignment)
     # The environment in which the doctests are run (global vars)
     self.good_env = {}
     self.verb = self.args.verbose
     # Initialize the doctest module objects that will do the testing/parse
     self.parser = DocTestParser()
     self.runner = DocTestRunner(verbose=self.verb, optionflags=FAIL_FAST)
     self.lines_exec = 0
     self.lines_total = 0
Esempio n. 22
0
    def run(self, test, compileflags=None, out=None, clear_globs=True):
        '''Run the update runner'''
        self._new_want = {}
        (f, t) = DocTestRunner.run(self, test, compileflags, out, clear_globs)

        # Update the test's docstring, and the lineno's of the
        # examples, by breaking it into lines and replacing the old
        # expected outputs with the new expected outputs.
        old_lines = test.docstring.split('\n')
        new_lines = []
        lineno = 0
        offset = 0

        for example in test.examples:
            # Copy the lines up through the start of the example's
            # output from old_lines to new_lines.
            got_start = example.lineno + example.source.count('\n')
            new_lines += old_lines[lineno:got_start]
            lineno = got_start
            # Do a sanity check to make sure we're at the right lineno
            # (In particular, check that the example's expected output
            # appears in old_lines where we expect it to appear.)
            if example.want:
                assert (example.want.split('\n')[0] ==
                        old_lines[lineno][example.indent:]), \
                        'Line number mismatch at %d' % lineno
            # Skip over the old expected output.
            old_len = example.want.count('\n')
            lineno += old_len
            # Mark any changes we make.
            if self._mark_updates and example in self._new_want:
                new_lines.append(' ' * example.indent + '... ' +
                                 '# [!!] OUTPUT AUTOMATICALLY UPDATED [!!]')
            # Add the new expected output.
            new_want = self._new_want.get(example, example.want)
            if new_want:
                new_want = '\n'.join([
                    ' ' * example.indent + l for l in new_want[:-1].split('\n')
                ])
                new_lines.append(new_want)
            # Update the example's want & lieno fields
            example.want = new_want
            example.lineno += offset
            offset += example.want.count('\n') - old_len
        # Add any remaining lines
        new_lines += old_lines[lineno:]

        # Update the test's docstring.
        test.docstring = '\n'.join(new_lines)

        # Return failures & tries
        return (f, t)
Esempio n. 23
0
def check_doctest(func_name, module, run=True): # func_name是函数的名字,module是模块的名字
    """Check that MODULE.FUNC_NAME doctest passes."""
    func = getattr(module, func_name) # 好吧,终于看到了这个玩意了。Java的反射机制是吧!
    tests = DocTestFinder().find(func)
    if not tests:
        print("No doctests found for " + func_name)
        return True
    fn = lambda: DocTestRunner().run(tests[0]) # 一个匿名函数
    result = test_eval(fn, tuple()) #
    if result.failed != 0:
        print("A doctest example failed for " + func_name + ".")
        return True
    return False
Esempio n. 24
0
def check_doctest(func_name, module, run=True):
    """Check that MODULE.FUNC_NAME doctest passes."""
    func = getattr(module, func_name)
    tests = DocTestFinder().find(func)
    if not tests:
        print("No doctests found for " + func_name)
        return True
    fn = lambda: DocTestRunner().run(tests[0])
    result = test_eval(fn, tuple())
    if result.failed != 0:
        print("A doctest example failed for " + func_name + ".")
        return True
    return False
Esempio n. 25
0
    def run(self, test, compileflags=None, out=None, clear_globs=True):
        '''Run the update runner'''
        self._new_want = {}
        (f,t) = DocTestRunner.run(self, test, compileflags, out, clear_globs)

        # Update the test's docstring, and the lineno's of the
        # examples, by breaking it into lines and replacing the old
        # expected outputs with the new expected outputs.
        old_lines = test.docstring.split('\n')
        new_lines = []
        lineno = 0
        offset = 0

        for example in test.examples:
            # Copy the lines up through the start of the example's
            # output from old_lines to new_lines.
            got_start = example.lineno + example.source.count('\n')
            new_lines += old_lines[lineno:got_start]
            lineno = got_start
            # Do a sanity check to make sure we're at the right lineno
            # (In particular, check that the example's expected output
            # appears in old_lines where we expect it to appear.)
            if example.want:
                assert (example.want.split('\n')[0] == 
                        old_lines[lineno][example.indent:]), \
                        'Line number mismatch at %d' % lineno
            # Skip over the old expected output.
            old_len = example.want.count('\n')
            lineno += old_len
            # Mark any changes we make.
            if self._mark_updates and example in self._new_want:
                new_lines.append(' '*example.indent + '... ' + 
                                 '# [!!] OUTPUT AUTOMATICALLY UPDATED [!!]')
            # Add the new expected output.
            new_want = self._new_want.get(example, example.want)
            if new_want:
                new_want = '\n'.join([' '*example.indent+l
                                      for l in new_want[:-1].split('\n')])
                new_lines.append(new_want)
            # Update the example's want & lieno fields
            example.want = new_want
            example.lineno += offset
            offset += example.want.count('\n') - old_len
        # Add any remaining lines
        new_lines += old_lines[lineno:]

        # Update the test's docstring.
        test.docstring = '\n'.join(new_lines)

        # Return failures & tries
        return (f,t)
Esempio n. 26
0
def _import_docstring(documenter):
    code_content = _import_docstring_code_content(documenter)
    if code_content:
        # noinspection PyBroadException
        try:
            code, content = code_content
            parser = DocTestParser()
            runner = DocTestRunner(verbose=0,
                                   optionflags=NORMALIZE_WHITESPACE | ELLIPSIS)

            glob = {}
            if documenter.modname:
                exec('from %s import *\n' % documenter.modname, glob)

            tests = parser.get_doctest(code, glob, '', '', 0)
            runner.run(tests, clear_globs=False)

            documenter.object = tests.globs[documenter.name]
            documenter.code = content
            documenter.is_doctest = True
            return True
        except Exception:
            pass
Esempio n. 27
0
def _import_docstring(documenter):
    code_content = _import_docstring_code_content(documenter)
    if code_content:
        # noinspection PyBroadException
        try:
            code, content = code_content
            parser = DocTestParser()
            runner = DocTestRunner(verbose=0,
                                   optionflags=NORMALIZE_WHITESPACE | ELLIPSIS)

            glob = {}
            if documenter.modname:
                exec('from %s import *\n' % documenter.modname, glob)

            tests = parser.get_doctest(code, glob, '', '', 0)
            runner.run(tests, clear_globs=False)

            documenter.object = tests.globs[documenter.name]
            documenter.code = content
            documenter.is_doctest = True
            return True
        except Exception:
            pass
Esempio n. 28
0
def _import_docstring(documenter):
    if getattr(documenter.directive, 'content', None):
        # noinspection PyBroadException
        try:
            import textwrap

            content = documenter.directive.content

            def get_code(source, c=''):
                s = "\n%s" % c
                return textwrap.dedent(s.join(map(str, source)))

            is_doctest = contains_doctest(get_code(content))
            offset = documenter.directive.content_offset
            if is_doctest:
                parent, parent_offset = get_grandfather_content(content)
                parent = parent[:offset + len(content) - parent_offset]
                code = get_code(parent)
            else:
                code = get_code(content, '>>> ')

            parser = DocTestParser()
            runner = DocTestRunner(verbose=0,
                                   optionflags=NORMALIZE_WHITESPACE | ELLIPSIS)

            glob = {}
            exec('import %s as mdl\n' % documenter.modname, glob)
            glob = glob['mdl'].__dict__
            tests = parser.get_doctest(code, glob, '', '', 0)
            runner.run(tests, clear_globs=False)

            documenter.object = tests.globs[documenter.name]
            documenter.code = content
            documenter.is_doctest = True
            return True
        except:
            return False
Esempio n. 29
0
class Debugger:
    # Just using this for reporting:
    runner = DocTestRunner()

    def __init__(self, checker=None, set_trace=None):
        if checker is None:
            checker = OutputChecker()
        self.checker = checker
        if set_trace is None:
            set_trace = pdb.Pdb().set_trace
        self.set_trace = set_trace

    def _check_output(self, example):
        want = example.want
        optionflags = self._get_optionflags(example)
        got = sys.stdout.getvalue()
        sys.stdout.truncate(0)
        if not self.checker.check_output(want, got, optionflags):
            self.runner.report_failure(self.save_stdout.write, self.test,
                                       example, got)
            return False
        else:
            return True

    def _check_exception(self, example):
        want_exc_msg = example.exc_msg
        optionflags = self._get_optionflags(example)
        exc_info = sys.exc_info()
        got_exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
        if not self.checker.check_output(want_exc_msg, got_exc_msg,
                                         optionflags):
            got = _exception_traceback(exc_info)
            self.runner.report_failure(self.save_stdout.write, self.test,
                                       example, got)
            return False
        else:
            return True

    def _print_if_not_none(self, *args):
        if args == (None, ):
            pass
        elif len(args) == 1:
            print ` args[0] `
        else:
            print ` args `  # not quite right: >>> 1,

    def _comment_line(self, line):
        "Return a commented form of the given line"
        line = line.rstrip()
        if line:
            return '# ' + line
        else:
            return '#'

    def _script_from_examples(self, s):
        output = []
        examplenum = 0
        for piece in MyDocTestParser().parse(s):
            if isinstance(piece, Example):
                self._script_from_example(piece, examplenum, output)
                examplenum += 1
            else:
                # Add non-example text.
                output += [
                    self._comment_line(l) for l in piece.split('\n')[:-1]
                ]
        # Combine the output, and return it.
        return '\n'.join(output)

    _CHK_OUT = 'if not CHECK_OUTPUT(__examples__[%d]): __set_trace__()'
    _CHK_EXC = 'if not CHECK_EXCEPTION(__examples__[%d]): __set_trace__()'

    def _script_from_example(self, example, i, output):
        source = self._simulate_compile_singlemode(example.source)[:-1]

        if example.exc_msg is None:
            output.append(source)
            output.append(self._CHK_OUT % i)
        else:
            output.append('try:')
            output.append(_indent(source))
            output.append('    ' + self._CHK_OUT % i)
            output.append('except:')
            output.append('    ' + self._CHK_EXC % i)

    def _simulate_compile_singlemode(self, s):
        # Calculate line offsets
        lines = [0, 0]
        pos = 0
        while 1:
            pos = s.find('\n', pos) + 1
            if not pos: break
            lines.append(pos)
        lines.append(len(s))

        oldpos = 0
        parenlevel = 0
        deflevel = 0
        output = []
        stmt = []

        text = StringIO(s)
        tok_gen = tokenize.generate_tokens(text.readline)
        for toktype, tok, (srow, scol), (erow, ecol), line in tok_gen:
            newpos = lines[srow] + scol
            stmt.append(s[oldpos:newpos])
            if tok != '':
                stmt.append(tok)
            oldpos = newpos + len(tok)

            # Update the paren level.
            if tok in '([{':
                parenlevel += 1
            if tok in '}])':
                parenlevel -= 1

            if tok in ('def', 'class') and deflevel == 0:
                deflevel = 1
            if deflevel and toktype == token.INDENT:
                deflevel += 1
            if deflevel and toktype == token.DEDENT:
                deflevel -= 1

            # Are we starting a statement?
            if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT,
                             token.INDENT, token.ENDMARKER) or tok == ':')
                    and parenlevel == 0):
                if deflevel == 0 and self._is_expr(stmt[1:-2]):
                    output += stmt[0]
                    output.append('__print__((')
                    output += stmt[1:-2]
                    output.append('))')
                    output += stmt[-2:]
                else:
                    output += stmt
                stmt = []
        return ''.join(output)

    def _is_expr(self, stmt):
        stmt = [t for t in stmt if t]
        if not stmt:
            return False

        # An assignment signifies a non-exception, *unless* it
        # appears inside of parens (eg, ``f(x=1)``.)
        parenlevel = 0
        for tok in stmt:
            if tok in '([{': parenlevel += 1
            if tok in '}])': parenlevel -= 1
            if (parenlevel == 0
                    and tok in ('=', '+=', '-=', '*=', '/=', '%=', '&=', '+=',
                                '^=', '<<=', '>>=', '**=', '//=')):
                return False

        # Any keywords *except* "not", "or", "and", "lambda", "in", "is"
        # signifies a non-expression.
        if stmt[0] in ("assert", "break", "class", "continue", "def", "del",
                       "elif", "else", "except", "exec", "finally", "for",
                       "from", "global", "if", "import", "pass", "print",
                       "raise", "return", "try", "while", "yield"):
            return False
        return True

    def _get_optionflags(self, example):
        optionflags = 0
        for (flag, val) in example.options.items():
            if val:
                optionflags |= flag
            else:
                optionflags &= ~flag
        return optionflags

    def debug(self, test, pm=False):
        self.test = test

        # Save the old stdout
        self.save_stdout = sys.stdout

        # Convert the source docstring to a script.
        script = self._script_from_examples(test.docstring)

        # Create a debugger.
        debugger = _OutputRedirectingPdb(sys.stdout)

        # Patch pdb.set_trace to restore sys.stdout during interactive
        # debugging (so it's not still redirected to self._fakeout).
        save_set_trace = pdb.set_trace
        pdb.set_trace = debugger.set_trace

        # Write the script to a temporary file.  Note that
        # tempfile.NameTemporaryFile() cannot be used.  As the docs
        # say, a file so created cannot be opened by name a second
        # time on modern Windows boxes, and execfile() needs to open
        # it.
        srcfilename = tempfile.mktemp(".py", "doctestdebug_")
        f = open(srcfilename, 'w')
        f.write(script)
        f.close()

        # Set up the globals
        test.globs['CHECK_OUTPUT'] = self._check_output
        test.globs['CHECK_EXCEPTION'] = self._check_exception
        test.globs['__print__'] = self._print_if_not_none
        test.globs['__set_trace__'] = debugger.set_trace
        test.globs['__examples__'] = self.test.examples
        try:
            if pm is False:
                debugger.run("execfile(%r)" % srcfilename, test.globs,
                             test.globs)
            else:
                try:
                    sys.stdout = _SpoofOut()
                    try:
                        execfile(srcfilename, test.globs)
                    except bdb.BdbQuit:
                        return
                    except:
                        sys.stdout = self.save_stdout
                        exc_info = sys.exc_info()
                        exc_msg = traceback.format_exception_only(
                            exc_info[0], exc_info[1])[-1]
                        self.save_stdout.write(self.runner.DIVIDER + '\n')
                        self.save_stdout.write('Unexpected exception:\n' +
                                               _indent(exc_msg))
                        raise
                        #self.post_mortem(debugger, exc_info[2])
                finally:
                    sys.stdout = self.save_stdout
        finally:
            sys.set_trace = save_set_trace
            os.remove(srcfilename)

    def post_mortem(self, debugger, t):
        debugger.reset()
        while t.tb_next is not None:
            t = t.tb_next
        debugger.interaction(t.tb_frame, t)
Esempio n. 30
0
 def __init__(self, optionflags=0):
     optionflags |= _unittest_reportflags
     BaseDocTestRunner.__init__(self,
                                verbose=False,
                                optionflags=optionflags)
Esempio n. 31
0
def run_test(doctest):
    summary = compat.NativeIO()
    runner = DocTestRunner(optionflags=REPORT_NDIFF)
    runner.run(doctest, out=summary.write)

    assert runner.failures == 0, '\n' + summary.getvalue()
Esempio n. 32
0
                return want, got

        def check_output(self, want, got, optionflags):
            want, got = self.remove_u(want, got)
            return super(Py23DocChecker, self).check_output(
                want, got, optionflags)

        def output_difference(self, example, got, optionflags):
            example.want, got = self.remove_u(example.want, got)
            return super(Py23DocChecker, self).output_difference(
                example, got, optionflags)

    global master
    m = sys.modules.get('__main__')
    finder = DocTestFinder()
    runner = DocTestRunner(checker=Py23DocChecker())
    for test in finder.find(m, m.__name__):
        runner.run(test)
    runner.summarize()
    import sys
    sys.exit()

    # yapf: disable
    class Cache(object):
        def __init__(self):
            self.cache = {}

        def set(self, k, v, ttl):
            self.cache[k] = v

        def get(self, k):
Esempio n. 33
0
 def __init__(self, verbose=False, mark_updates=False):
     '''Construct a new update runner'''
     self._mark_updates = mark_updates
     DocTestRunner.__init__(self, verbose=verbose)
Esempio n. 34
0
 def __init__(self, verbose=False, mark_updates=False):
     '''Construct a new update runner'''
     self._mark_updates = mark_updates
     DocTestRunner.__init__(self, verbose=verbose)
Esempio n. 35
0
def run_test(doctest):
	summary = StringIO()
	runner = DocTestRunner(optionflags=REPORT_NDIFF)
	runner.run(doctest, out=summary.write)

	assert runner.failures == 0, '\n' + summary.getvalue()
Esempio n. 36
0
class TestingProtocol(models.Protocol):
    """A Protocol that executes doctests as lists of Example objects, supports 
    suite/case specificity, alternate file testing, and provides users with 
    details such as cases passed and test coverage.
    """
    def __init__(self, args, assignment):
        super().__init__(args, assignment)
        # The environment in which the doctests are run (global vars)
        self.good_env = {}
        self.verb = self.args.verbose
        # Initialize the doctest module objects that will do the testing/parse
        self.parser = DocTestParser()
        self.runner = DocTestRunner(verbose=self.verb, optionflags=FAIL_FAST)
        self.lines_exec = 0
        self.lines_total = 0


    def test(self, good_env={}, suite=None, case=None):
        test_results = {}
        # all examples to be run will be put in exs
        exs = collections.OrderedDict()
        # use regex to get raw strings organized into suite/case
        self.get_data()
        try:
            if suite:
                exs = self.get_suite_examples(suite, case)
            elif case:
                # No support for cases without their suite
                raise EarlyExit('python3 ok: error: ' 
                    'Please specify suite for given case ({}).'.format(case[0]))
            else:
                exs = self.get_all_examples()
            # gets analytics to be returned
            test_results[self.tstfile_name] =  self.analyze(suite, case, exs)
        except KeyError as e:
            raise EarlyExit('python3 ok: error: ' 
                    'Suite/Case label must be valid.'
                    '(Suites: {}, Cases: {})'.format(self.num_suites, self.num_cases))
        return test_results

    def analyze(self, suite, case, examples):
        failed, attempted = self.run_examples(examples)
        self.cov.stop()
        passed = attempted - failed
        format.print_test_progress_bar( '{} summary'.format(self.tstfile_name), 
                                        passed, failed, verbose=self.verb)
        # only support test coverage stats when running everything
        if not suite:
            self.print_coverage()
            if self.args.coverage:
                if self.lines_exec == self.lines_total:
                    print("Maximum coverage achieved! Great work!")
                else:
                    self.give_suggestions()
        return {'suites_total' : self.num_suites, 'cases_total': self.num_cases, 
                'exs_failed' : failed, 'exs_passed' : passed, 'attempted' : attempted,
                'actual_cov' : self.lines_exec, 'total_cov' : self.lines_total}

    def give_suggestions(self):
        print("Consider adding tests for the following:")
        for file in self.clean_src:
            file += '.py'
            cov_stats = self.cov.analysis2(file)
            missing_cov = cov_stats[3]
            if missing_cov:
                print('   File: {}'.format(file))
                missing_string = '      Line(s): ' + ','.join(map(str, missing_cov)) 
                print(missing_string)



    def get_suite_examples(self, suite, case):
        # suite/case specified, so only parse relevant text into Examples
        exs = collections.OrderedDict()
        case_ex = collections.OrderedDict()
        # get the shared lines that should impact all the cases in the suite.
        shrd_txt = self.shared_case_data[suite]
        if shrd_txt:
            parse_shared = self.parser.parse(shrd_txt.group(0), self.tstfile_name)
            shrd_ex = [i for i in parse_shared if isinstance(i, Example)]
            if shrd_ex:
                case_ex['shared'] = shrd_ex
        if case:
            if str(case[0]) not in self.data[suite]:
                 raise KeyError
            parsed_temp_examples = self.parser.parse(self.data[suite][case[0]], self.tstfile_name)
            case_examples = [i for i in parsed_temp_examples if isinstance(i, Example)]
            case_ex[str(case[0])] = case_examples
        else:
            for itemcase in self.data[suite].keys():
                parsed_temp_examples = self.parser.parse(self.data[suite][itemcase], self.tstfile_name)
                case_examples = [i for i in parsed_temp_examples if isinstance(i, Example)]
                case_ex[itemcase] = case_examples
        exs[suite] = case_ex
        return exs


    def get_all_examples(self):
        # no suite/case flag, so parses all text into Example objects
        exs = collections.OrderedDict()
        for sui in self.data.keys():
            case_ex = collections.OrderedDict()
            # get the shared lines that should impact all the cases in the suite.
            shrd_txt = self.shared_case_data[sui]
            if shrd_txt:
                parse_shared = self.parser.parse(shrd_txt.group(0), self.tstfile_name)
                shrd_ex = [i for i in parse_shared if isinstance(i, Example)]
                if shrd_ex:
                    case_ex['shared'] = shrd_ex
            for itemcase in self.data[sui].keys():
                parsed_temp_examples = self.parser.parse(self.data[sui][itemcase], self.tstfile_name)
                case_examples = [i for i in parsed_temp_examples if isinstance(i, Example)]
                case_ex[itemcase] = case_examples
            exs[sui] = case_ex
        return exs

    # catch inf loops/ recur err
    @conditionally(timeout(10), os.name != 'nt')
    def run_examples(self, exs):
        # runs the Example objects, keeps track of right/wrong etc
        total_failed = 0
        total_attempted = 0
        case = 'shared'
        for sui in exs.keys():
            if not total_failed:
                final_env = dict(self.good_env)
                if 'shared' in exs[sui].keys():
                    dtest = DocTest(exs[sui]['shared'], self.good_env, 'shared', None, None, None)
                    result = self.runner.run(dtest, clear_globs=False)
                    # take the env from shared dtest and save it for other exs
                    final_env = dict(self.good_env, **dtest.globs)
                    total_failed += result.failed
                    total_attempted += result.attempted
            for case in exs[sui].keys():
                if case != 'shared':
                    if not total_failed:
                        example_name = "Suite {}, Case {}".format(sui, case)
                        dtest = DocTest(exs[sui][case], final_env, example_name, None, None, None)
                        result = self.runner.run(dtest)
                        total_failed += result.failed
                        total_attempted += result.attempted
        return total_failed, total_attempted

    def get_data(self):
        # organizes data into suite/case strings to feed to the parser module
        self.tstfile_name, data_str = self.get_tstfile(self.testloc)
        self.data = collections.OrderedDict()
        self.shared_case_data = collections.OrderedDict()
        # chunk the file into suites
        data_suites = re.findall("(Suite\s*([\d\w]+))((?:(?!Suite)(.|\n))*)", data_str)
        self.num_suites = len(data_suites)
        self.num_cases = 0
        for curr_suite in data_suites:
                case_data = collections.OrderedDict()
                # chunk the suite into cases
                cases = re.findall("(Case\s*([\d\w]+))((?:(?!Case)(.|\n))*)", curr_suite[2])
                self.num_cases += len(cases)
                self.shared_case_data[str(curr_suite[1])] = re.match("((?:(?!Case)(.|\n))*)", curr_suite[2])
                for curr_case in cases:
                    case_data[curr_case[1]] = curr_case[2]
                self.data[curr_suite[1]] = case_data

    def get_tstfile(self, location):
        # return file, file as a string
        PATH = os.path.join(location, self.args.testing)
        name = self.args.testing
        if not name.endswith('.rst'):
            raise EarlyExit('python3 ok: error: '
                        'Only .rst files are supported at this time.')
        try:
            with open(PATH, "r") as testfile:
                data_str=testfile.read()
        except FileNotFoundError as e:
            raise EarlyExit('python3 ok: error: '
                    '{} test file ({}) cannot be found.'.format(
                    'Default' if DEFAULT_TST_FILE==name else 'Specified', name))
        return name, data_str


    def print_coverage(self):
        # prints the coverage summary by diffing the two coverage trackers
        lines, executed = self.get_coverage(self.cov)
        self.lines_total = lines
        self.lines_exec = executed
        format.print_coverage_bar( 'Coverage summary', 
            self.lines_exec, self.lines_total,verbose=self.verb)

    def get_coverage(self, cov):
        # returns executable lines, executed_lines
        lines_run = 0
        total_lines = 0 
        for file in self.clean_src:
            file_cov = cov.analysis2(file + '.py')
            lines = len(file_cov[1])
            lines_not_run = len(file_cov[3])
            total_lines += lines
            lines_run += lines - lines_not_run
        return total_lines, lines_run


    def run(self, messages, testloc=CURR_DIR):
        if self.args.score or self.args.unlock or not self.args.testing:
            return

        # Note: All (and only) .py files given in the src will be tracked and 
        # contribute to coverage statistics
        self.clean_src = [i[:-3] for i in self.assignment.src if i.endswith('.py')]
        self.cov = coverage(source=[testloc], include=[file + '.py' for file in self.clean_src])
        self.testloc = testloc
        self.cov.start()
        analytics = self.test(self.good_env, self.args.suite, self.args.case)
        messages['testing'] = analytics