Exemplo n.º 1
0
def t():
    if sys.version_info[0] == 2:
        # This test is disabled in Python2. There are too many subtle differences in the syntax ('str' has to be renamed 'unicode',
        # 'u' prefix is needed in front of string literals, etc), it's too hacky to preserve compatibility.
        #
        # In any case this test isn't to verify that the library works in Python2, it's too check that the README is up to date
        # with the code. So it doesn't matter.
        #
        return

    readme_file_path = path.join(path.dirname(__file__), '..', 'README.md')
    with open(readme_file_path, 'rb') as file_in:
        doctest_str = '\n\n'.join(
            re.findall(
                r'```python\s+(.+?)```',
                file_in.read().decode('UTF-8'),
                flags=re.S,
            ), )
    assert doctest_str
    parser = DocTestParser()
    runner = DocTestRunner()
    runner.run(
        parser.get_doctest(
            doctest_str,
            dict(globals(), json=json, pickle=pickle),
            'README.md',
            'README.md',
            0,
        ), )
    assert runner.failures == 0
Exemplo n.º 2
0
def run_doctests(module, examples):
    from doctest import DocTest, DocTestRunner, ELLIPSIS

    dt = DocTest(examples, module.__dict__, module.__file__, None, None, None)
    dtr = DocTestRunner(optionflags=ELLIPSIS)

    dtr.run(dt, clear_globs=False)

    return dtr
Exemplo n.º 3
0
    def run_examples(self):
        from doctest import DocTest, DocTestRunner
        examples = sum([part.examples for part in self.parts if isinstance(part, DocTestPart)],[])
        dt = DocTest(examples, self.module.__dict__, self.filename, None, None, None)
        dtr = DocTestRunner()

        def tmp_out(message_to_throw_away):
            # TODO capture error messages, warn
            return

        dtr.run(dt, out=tmp_out, clear_globs=False)
Exemplo n.º 4
0
def test_readme_examples():
    readme_file_path = path.join(path.dirname(__file__), '..', 'README.md')
    with open(readme_file_path, 'rt', encoding='UTF-8') as file_in:
        all_blocks = re.findall(r'```(\w+)\s+(.+?)```',
                                file_in.read(),
                                flags=re.S)
    with TemporaryDirectory() as temp_dir:
        chdir(temp_dir)
        for syntax, block in all_blocks:
            if syntax == 'console':
                command_match = re.search(r'^\$ (\w+) (.+)\s+', block)
                if not command_match:
                    raise ValueError(block)
                print(command_match.group().rstrip())
                command, args = command_match.groups()
                block = block[command_match.end():]

                if command == 'cat':
                    # save the sample file to an actual file
                    file_name = args
                    with open(path.join(temp_dir, file_name),
                              'wt',
                              encoding='UTF-8') as file_out:
                        file_out.write(block)

                else:
                    # check that the command output is as expcted
                    actual_output = check_output(
                        '%s %s' % (command, args),
                        shell=True,
                        cwd=temp_dir,
                        encoding='UTF-8',
                        env={
                            **environ,
                            # `canif --help` reads this, and it can vary in the CI environment, so make it fixed
                            'COLUMNS':
                            '71',
                        },
                    )
                    print(actual_output)
                    assert actual_output == block

            elif syntax == 'python':
                parser = DocTestParser()
                test = parser.get_doctest(block, {'canif': canif}, 'README.md',
                                          'README.md', 0)
                runner = DocTestRunner()
                runner.run(test)
                assert not runner.failures
Exemplo n.º 5
0
 def test(self):
     # Make a new runner per function to be tested
     runner = DocTestRunner(verbose=d2u.verbose)
     for the_test in d2u.finder.find(func, func.__name__):
         runner.run(the_test)
     failed = count_failures(runner)
     if failed:
         # Since we only looked at a single function's docstring,
         # failed should contain at most one item.  More than that
         # is a case we can't handle and should error out on
         if len(failed) > 1:
             err = "Invalid number of test results: %s" % failed
             raise ValueError(err)
         # Report a normal failure.
         self.fail('failed doctests: %s' % str(failed[0]))
Exemplo n.º 6
0
    def run_examples(self):
        from doctest import DocTest, DocTestRunner
        examples = sum([
            part.examples
            for part in self.parts if isinstance(part, DocTestPart)
        ], [])
        dt = DocTest(examples, self.module.__dict__, self.filename, None, None,
                     None)
        dtr = DocTestRunner()

        def tmp_out(message_to_throw_away):
            # TODO capture error messages, warn
            return

        dtr.run(dt, out=tmp_out, clear_globs=False)
Exemplo n.º 7
0
 def test(self):
     # Make a new runner per function to be tested
     runner = DocTestRunner(verbose=d2u.verbose)
     for the_test in d2u.finder.find(func, func.__name__):
         runner.run(the_test)
     failed = count_failures(runner)
     if failed:
         # Since we only looked at a single function's docstring,
         # failed should contain at most one item.  More than that
         # is a case we can't handle and should error out on
         if len(failed) > 1:
             err = "Invalid number of test results:" % failed
             raise ValueError(err)
         # Report a normal failure.
         self.fail('failed doctests: %s' % str(failed[0]))
Exemplo n.º 8
0
    def run(self, test, compileflags=None, out=None, clear_globs=True):
        save_stderr = sys.stderr
        sys.stderr = _SpoofOut()

        if self._verbosity > 0:
            print((
                self._stderr_term.CYAN+self._stderr_term.BOLD+
                'Testing %s...'%test.name+self._stderr_term.NORMAL), file=save_stderr)
        try:
            fails, tries = DocTestRunner.run(self, test, compileflags,
                                             out, clear_globs)
        except KeyboardInterrupt:
            if self._current_test is None: raise

            print(self._failure_header(*self._current_test), file=save_stderr)
            print((
                self._stderr_term.RED+self._stderr_term.BOLD+
                'Keyboard Interrupt!'+self._stderr_term.NORMAL), file=save_stderr)
        if self._verbosity == 1:
            save_stderr.write(self._stderr_term.CLEAR_LINE)
        if self._verbosity > 0:
            if fails:
                print((
                    self._stderr_term.RED+self._stderr_term.BOLD+
                    '  %d example(s) failed!'%fails+self._stderr_term.NORMAL), file=save_stderr)
            else:
                print((
                    self._stderr_term.GREEN+self._stderr_term.BOLD+
                    '  All examples passed'+self._stderr_term.NORMAL), file=save_stderr)
        print(file=save_stderr)
        sys.stderr = save_stderr
Exemplo n.º 9
0
    def run(self, test, compileflags=None, out=None, clear_globs=True):
        save_stderr = sys.stderr
        sys.stderr = _SpoofOut()
        
        if self._verbosity > 0:
            print >>save_stderr, (
                self._stderr_term.CYAN+self._stderr_term.BOLD+
                'Testing %s...'%test.name+self._stderr_term.NORMAL)
        try:
            fails, tries = DocTestRunner.run(self, test, compileflags,
                                             out, clear_globs)
        except KeyboardInterrupt:
            if self._current_test is None: raise

            print >>save_stderr, self._failure_header(*self._current_test)
            print >>save_stderr, (
                self._stderr_term.RED+self._stderr_term.BOLD+
                'Keyboard Interrupt!'+self._stderr_term.NORMAL)
        if self._verbosity == 1:
            save_stderr.write(self._stderr_term.CLEAR_LINE)
        if self._verbosity > 0:
            if fails:
                print >>save_stderr, (
                    self._stderr_term.RED+self._stderr_term.BOLD+
                    '  %d example(s) failed!'%fails+self._stderr_term.NORMAL)
            else:
                print >>save_stderr, (
                    self._stderr_term.GREEN+self._stderr_term.BOLD+
                    '  All examples passed'+self._stderr_term.NORMAL)
        print >>save_stderr
        sys.stderr = save_stderr
Exemplo n.º 10
0
 def DTC_runTest(self):
     test = self._dt_test
     old = sys.stdout
     new = StringIO()
     optionflags = self._dt_optionflags
     if not (optionflags & REPORTING_FLAGS):
         # The option flags don't include any reporting flags,
         # so add the default reporting flags
         optionflags |= _unittest_reportflags
     # Patching doctestcase to enable verbose mode
     global g_doctest_verbose
     runner = DocTestRunner(optionflags=optionflags,
                            checker=self._dt_checker,
                            verbose=g_doctest_verbose)
     # End of patch
     try:
         runner.DIVIDER = "-"*70
         failures, tries = runner.run(
             test, out=new.write, clear_globs=False)
     finally:
         sys.stdout = old
     if failures:
         raise self.failureException(self.format_failure(new.getvalue()))
     elif g_doctest_verbose:
         print new.getvalue()
Exemplo n.º 11
0
 def DTC_runTest(self):
     test = self._dt_test
     old = sys.stdout
     new = StringIO()
     optionflags = self._dt_optionflags
     if not (optionflags & REPORTING_FLAGS):
         # The option flags don't include any reporting flags,
         # so add the default reporting flags
         optionflags |= _unittest_reportflags
     # Patching doctestcase to enable verbose mode
     global g_doctest_verbose
     runner = DocTestRunner(optionflags=optionflags,
                            checker=self._dt_checker,
                            verbose=g_doctest_verbose)
     # End of patch
     try:
         runner.DIVIDER = "-" * 70
         failures, tries = runner.run(test,
                                      out=new.write,
                                      clear_globs=False)
     finally:
         sys.stdout = old
     if failures:
         raise self.failureException(self.format_failure(new.getvalue()))
     elif g_doctest_verbose:
         print new.getvalue()
Exemplo n.º 12
0
    def run(self, test, compileflags=None, out=None, clear_globs=True):
        '''Run the update runner'''
        self._new_want = {}
        (f, t) = DocTestRunner.run(self, test, compileflags, out, clear_globs)

        # Update the test's docstring, and the lineno's of the
        # examples, by breaking it into lines and replacing the old
        # expected outputs with the new expected outputs.
        old_lines = test.docstring.split('\n')
        new_lines = []
        lineno = 0
        offset = 0

        for example in test.examples:
            # Copy the lines up through the start of the example's
            # output from old_lines to new_lines.
            got_start = example.lineno + example.source.count('\n')
            new_lines += old_lines[lineno:got_start]
            lineno = got_start
            # Do a sanity check to make sure we're at the right lineno
            # (In particular, check that the example's expected output
            # appears in old_lines where we expect it to appear.)
            if example.want:
                assert (example.want.split('\n')[0] ==
                        old_lines[lineno][example.indent:]), \
                        'Line number mismatch at %d' % lineno
            # Skip over the old expected output.
            old_len = example.want.count('\n')
            lineno += old_len
            # Mark any changes we make.
            if self._mark_updates and example in self._new_want:
                new_lines.append(' ' * example.indent + '... ' +
                                 '# [!!] OUTPUT AUTOMATICALLY UPDATED [!!]')
            # Add the new expected output.
            new_want = self._new_want.get(example, example.want)
            if new_want:
                new_want = '\n'.join([
                    ' ' * example.indent + l for l in new_want[:-1].split('\n')
                ])
                new_lines.append(new_want)
            # Update the example's want & lieno fields
            example.want = new_want
            example.lineno += offset
            offset += example.want.count('\n') - old_len
        # Add any remaining lines
        new_lines += old_lines[lineno:]

        # Update the test's docstring.
        test.docstring = '\n'.join(new_lines)

        # Return failures & tries
        return (f, t)
Exemplo n.º 13
0
    def run(self, test, compileflags=None, out=None, clear_globs=True):
        '''Run the update runner'''
        self._new_want = {}
        (f,t) = DocTestRunner.run(self, test, compileflags, out, clear_globs)

        # Update the test's docstring, and the lineno's of the
        # examples, by breaking it into lines and replacing the old
        # expected outputs with the new expected outputs.
        old_lines = test.docstring.split('\n')
        new_lines = []
        lineno = 0
        offset = 0

        for example in test.examples:
            # Copy the lines up through the start of the example's
            # output from old_lines to new_lines.
            got_start = example.lineno + example.source.count('\n')
            new_lines += old_lines[lineno:got_start]
            lineno = got_start
            # Do a sanity check to make sure we're at the right lineno
            # (In particular, check that the example's expected output
            # appears in old_lines where we expect it to appear.)
            if example.want:
                assert (example.want.split('\n')[0] == 
                        old_lines[lineno][example.indent:]), \
                        'Line number mismatch at %d' % lineno
            # Skip over the old expected output.
            old_len = example.want.count('\n')
            lineno += old_len
            # Mark any changes we make.
            if self._mark_updates and example in self._new_want:
                new_lines.append(' '*example.indent + '... ' + 
                                 '# [!!] OUTPUT AUTOMATICALLY UPDATED [!!]')
            # Add the new expected output.
            new_want = self._new_want.get(example, example.want)
            if new_want:
                new_want = '\n'.join([' '*example.indent+l
                                      for l in new_want[:-1].split('\n')])
                new_lines.append(new_want)
            # Update the example's want & lieno fields
            example.want = new_want
            example.lineno += offset
            offset += example.want.count('\n') - old_len
        # Add any remaining lines
        new_lines += old_lines[lineno:]

        # Update the test's docstring.
        test.docstring = '\n'.join(new_lines)

        # Return failures & tries
        return (f,t)
Exemplo n.º 14
0
def _import_docstring(documenter):
    code_content = _import_docstring_code_content(documenter)
    if code_content:
        # noinspection PyBroadException
        try:
            code, content = code_content
            parser = DocTestParser()
            runner = DocTestRunner(verbose=0,
                                   optionflags=NORMALIZE_WHITESPACE | ELLIPSIS)

            glob = {}
            if documenter.modname:
                exec('from %s import *\n' % documenter.modname, glob)

            tests = parser.get_doctest(code, glob, '', '', 0)
            runner.run(tests, clear_globs=False)

            documenter.object = tests.globs[documenter.name]
            documenter.code = content
            documenter.is_doctest = True
            return True
        except Exception:
            pass
Exemplo n.º 15
0
def _import_docstring(documenter):
    code_content = _import_docstring_code_content(documenter)
    if code_content:
        # noinspection PyBroadException
        try:
            code, content = code_content
            parser = DocTestParser()
            runner = DocTestRunner(verbose=0,
                                   optionflags=NORMALIZE_WHITESPACE | ELLIPSIS)

            glob = {}
            if documenter.modname:
                exec('from %s import *\n' % documenter.modname, glob)

            tests = parser.get_doctest(code, glob, '', '', 0)
            runner.run(tests, clear_globs=False)

            documenter.object = tests.globs[documenter.name]
            documenter.code = content
            documenter.is_doctest = True
            return True
        except Exception:
            pass
Exemplo n.º 16
0
def _import_docstring(documenter):
    if getattr(documenter.directive, 'content', None):
        # noinspection PyBroadException
        try:
            import textwrap

            content = documenter.directive.content

            def get_code(source, c=''):
                s = "\n%s" % c
                return textwrap.dedent(s.join(map(str, source)))

            is_doctest = contains_doctest(get_code(content))
            offset = documenter.directive.content_offset
            if is_doctest:
                parent, parent_offset = get_grandfather_content(content)
                parent = parent[:offset + len(content) - parent_offset]
                code = get_code(parent)
            else:
                code = get_code(content, '>>> ')

            parser = DocTestParser()
            runner = DocTestRunner(verbose=0,
                                   optionflags=NORMALIZE_WHITESPACE | ELLIPSIS)

            glob = {}
            exec('import %s as mdl\n' % documenter.modname, glob)
            glob = glob['mdl'].__dict__
            tests = parser.get_doctest(code, glob, '', '', 0)
            runner.run(tests, clear_globs=False)

            documenter.object = tests.globs[documenter.name]
            documenter.code = content
            documenter.is_doctest = True
            return True
        except:
            return False
Exemplo n.º 17
0
def run_test(doctest):
	summary = StringIO()
	runner = DocTestRunner(optionflags=REPORT_NDIFF)
	runner.run(doctest, out=summary.write)

	assert runner.failures == 0, '\n' + summary.getvalue()
Exemplo n.º 18
0
        def check_output(self, want, got, optionflags):
            want, got = self.remove_u(want, got)
            return super(Py23DocChecker, self).check_output(
                want, got, optionflags)

        def output_difference(self, example, got, optionflags):
            example.want, got = self.remove_u(example.want, got)
            return super(Py23DocChecker, self).output_difference(
                example, got, optionflags)

    global master
    m = sys.modules.get('__main__')
    finder = DocTestFinder()
    runner = DocTestRunner(checker=Py23DocChecker())
    for test in finder.find(m, m.__name__):
        runner.run(test)
    runner.summarize()
    import sys
    sys.exit()

    # yapf: disable
    class Cache(object):
        def __init__(self):
            self.cache = {}

        def set(self, k, v, ttl):
            self.cache[k] = v

        def get(self, k):
            return self.cache.get(k, None)
    cache_client = Cache()
Exemplo n.º 19
0
class TestingProtocol(models.Protocol):
    """A Protocol that executes doctests as lists of Example objects, supports 
    suite/case specificity, alternate file testing, and provides users with 
    details such as cases passed and test coverage.
    """
    def __init__(self, args, assignment):
        super().__init__(args, assignment)
        # The environment in which the doctests are run (global vars)
        self.good_env = {}
        self.verb = self.args.verbose
        # Initialize the doctest module objects that will do the testing/parse
        self.parser = DocTestParser()
        self.runner = DocTestRunner(verbose=self.verb, optionflags=FAIL_FAST)
        self.lines_exec = 0
        self.lines_total = 0


    def test(self, good_env={}, suite=None, case=None):
        test_results = {}
        # all examples to be run will be put in exs
        exs = collections.OrderedDict()
        # use regex to get raw strings organized into suite/case
        self.get_data()
        try:
            if suite:
                exs = self.get_suite_examples(suite, case)
            elif case:
                # No support for cases without their suite
                raise EarlyExit('python3 ok: error: ' 
                    'Please specify suite for given case ({}).'.format(case[0]))
            else:
                exs = self.get_all_examples()
            # gets analytics to be returned
            test_results[self.tstfile_name] =  self.analyze(suite, case, exs)
        except KeyError as e:
            raise EarlyExit('python3 ok: error: ' 
                    'Suite/Case label must be valid.'
                    '(Suites: {}, Cases: {})'.format(self.num_suites, self.num_cases))
        return test_results

    def analyze(self, suite, case, examples):
        failed, attempted = self.run_examples(examples)
        self.cov.stop()
        passed = attempted - failed
        format.print_test_progress_bar( '{} summary'.format(self.tstfile_name), 
                                        passed, failed, verbose=self.verb)
        # only support test coverage stats when running everything
        if not suite:
            self.print_coverage()
            if self.args.coverage:
                if self.lines_exec == self.lines_total:
                    print("Maximum coverage achieved! Great work!")
                else:
                    self.give_suggestions()
        return {'suites_total' : self.num_suites, 'cases_total': self.num_cases, 
                'exs_failed' : failed, 'exs_passed' : passed, 'attempted' : attempted,
                'actual_cov' : self.lines_exec, 'total_cov' : self.lines_total}

    def give_suggestions(self):
        print("Consider adding tests for the following:")
        for file in self.clean_src:
            file += '.py'
            cov_stats = self.cov.analysis2(file)
            missing_cov = cov_stats[3]
            if missing_cov:
                print('   File: {}'.format(file))
                missing_string = '      Line(s): ' + ','.join(map(str, missing_cov)) 
                print(missing_string)



    def get_suite_examples(self, suite, case):
        # suite/case specified, so only parse relevant text into Examples
        exs = collections.OrderedDict()
        case_ex = collections.OrderedDict()
        # get the shared lines that should impact all the cases in the suite.
        shrd_txt = self.shared_case_data[suite]
        if shrd_txt:
            parse_shared = self.parser.parse(shrd_txt.group(0), self.tstfile_name)
            shrd_ex = [i for i in parse_shared if isinstance(i, Example)]
            if shrd_ex:
                case_ex['shared'] = shrd_ex
        if case:
            if str(case[0]) not in self.data[suite]:
                 raise KeyError
            parsed_temp_examples = self.parser.parse(self.data[suite][case[0]], self.tstfile_name)
            case_examples = [i for i in parsed_temp_examples if isinstance(i, Example)]
            case_ex[str(case[0])] = case_examples
        else:
            for itemcase in self.data[suite].keys():
                parsed_temp_examples = self.parser.parse(self.data[suite][itemcase], self.tstfile_name)
                case_examples = [i for i in parsed_temp_examples if isinstance(i, Example)]
                case_ex[itemcase] = case_examples
        exs[suite] = case_ex
        return exs


    def get_all_examples(self):
        # no suite/case flag, so parses all text into Example objects
        exs = collections.OrderedDict()
        for sui in self.data.keys():
            case_ex = collections.OrderedDict()
            # get the shared lines that should impact all the cases in the suite.
            shrd_txt = self.shared_case_data[sui]
            if shrd_txt:
                parse_shared = self.parser.parse(shrd_txt.group(0), self.tstfile_name)
                shrd_ex = [i for i in parse_shared if isinstance(i, Example)]
                if shrd_ex:
                    case_ex['shared'] = shrd_ex
            for itemcase in self.data[sui].keys():
                parsed_temp_examples = self.parser.parse(self.data[sui][itemcase], self.tstfile_name)
                case_examples = [i for i in parsed_temp_examples if isinstance(i, Example)]
                case_ex[itemcase] = case_examples
            exs[sui] = case_ex
        return exs

    # catch inf loops/ recur err
    @conditionally(timeout(10), os.name != 'nt')
    def run_examples(self, exs):
        # runs the Example objects, keeps track of right/wrong etc
        total_failed = 0
        total_attempted = 0
        case = 'shared'
        for sui in exs.keys():
            if not total_failed:
                final_env = dict(self.good_env)
                if 'shared' in exs[sui].keys():
                    dtest = DocTest(exs[sui]['shared'], self.good_env, 'shared', None, None, None)
                    result = self.runner.run(dtest, clear_globs=False)
                    # take the env from shared dtest and save it for other exs
                    final_env = dict(self.good_env, **dtest.globs)
                    total_failed += result.failed
                    total_attempted += result.attempted
            for case in exs[sui].keys():
                if case != 'shared':
                    if not total_failed:
                        example_name = "Suite {}, Case {}".format(sui, case)
                        dtest = DocTest(exs[sui][case], final_env, example_name, None, None, None)
                        result = self.runner.run(dtest)
                        total_failed += result.failed
                        total_attempted += result.attempted
        return total_failed, total_attempted

    def get_data(self):
        # organizes data into suite/case strings to feed to the parser module
        self.tstfile_name, data_str = self.get_tstfile(self.testloc)
        self.data = collections.OrderedDict()
        self.shared_case_data = collections.OrderedDict()
        # chunk the file into suites
        data_suites = re.findall("(Suite\s*([\d\w]+))((?:(?!Suite)(.|\n))*)", data_str)
        self.num_suites = len(data_suites)
        self.num_cases = 0
        for curr_suite in data_suites:
                case_data = collections.OrderedDict()
                # chunk the suite into cases
                cases = re.findall("(Case\s*([\d\w]+))((?:(?!Case)(.|\n))*)", curr_suite[2])
                self.num_cases += len(cases)
                self.shared_case_data[str(curr_suite[1])] = re.match("((?:(?!Case)(.|\n))*)", curr_suite[2])
                for curr_case in cases:
                    case_data[curr_case[1]] = curr_case[2]
                self.data[curr_suite[1]] = case_data

    def get_tstfile(self, location):
        # return file, file as a string
        PATH = os.path.join(location, self.args.testing)
        name = self.args.testing
        if not name.endswith('.rst'):
            raise EarlyExit('python3 ok: error: '
                        'Only .rst files are supported at this time.')
        try:
            with open(PATH, "r") as testfile:
                data_str=testfile.read()
        except FileNotFoundError as e:
            raise EarlyExit('python3 ok: error: '
                    '{} test file ({}) cannot be found.'.format(
                    'Default' if DEFAULT_TST_FILE==name else 'Specified', name))
        return name, data_str


    def print_coverage(self):
        # prints the coverage summary by diffing the two coverage trackers
        lines, executed = self.get_coverage(self.cov)
        self.lines_total = lines
        self.lines_exec = executed
        format.print_coverage_bar( 'Coverage summary', 
            self.lines_exec, self.lines_total,verbose=self.verb)

    def get_coverage(self, cov):
        # returns executable lines, executed_lines
        lines_run = 0
        total_lines = 0 
        for file in self.clean_src:
            file_cov = cov.analysis2(file + '.py')
            lines = len(file_cov[1])
            lines_not_run = len(file_cov[3])
            total_lines += lines
            lines_run += lines - lines_not_run
        return total_lines, lines_run


    def run(self, messages, testloc=CURR_DIR):
        if self.args.score or self.args.unlock or not self.args.testing:
            return

        # Note: All (and only) .py files given in the src will be tracked and 
        # contribute to coverage statistics
        self.clean_src = [i[:-3] for i in self.assignment.src if i.endswith('.py')]
        self.cov = coverage(source=[testloc], include=[file + '.py' for file in self.clean_src])
        self.testloc = testloc
        self.cov.start()
        analytics = self.test(self.good_env, self.args.suite, self.args.case)
        messages['testing'] = analytics
Exemplo n.º 20
0
def run_test(doctest):
    summary = compat.NativeIO()
    runner = DocTestRunner(optionflags=REPORT_NDIFF)
    runner.run(doctest, out=summary.write)

    assert runner.failures == 0, '\n' + summary.getvalue()