Exemplo n.º 1
0
def testfile(filename, fileErrors):
    """Funció testfile() que rep per paràmetres el filename que seria el fitxer
    doctest que volem mostrar per pantalla, i el fitxer fileErrors que serà
    els links dels errors que volem mostrar. La funció crearà una instància
    de myDocTestRunner, i instanciarà un paràmetre strings que serà una llista
    d'strings els cuals l[n] i l[n+1] seran els strings en el cual el n text estarà
    entremig"""
    fitxer = open(filename, "r").read()
    l = DocTestParser().parse(fitxer)

    l1 = []
    for i in l:
        if isinstance(i, str):
            l1.append(i)
    l2 = DocTestParser().get_examples(fitxer)

    mapp = dict()
    doc = doctest.DocTest(l2, mapp, "", None, None, None)

    runner = MyDocTestRunner()
    runner.strings = l1
    runner.run(doc)

    print("\n.. Enllacos als errors")
    with open(fileErrors, "r") as f:
        print(f.read())
Exemplo n.º 2
0
def run_doctest(name, doctest_string, global_environment):
    """
    Run a single test with given global_environment.
    Returns (True, '') if the doctest passes.
    Returns (False, failure_message) if the doctest fails.
    """
    examples = doctest.DocTestParser().parse(doctest_string, name)
    test = doctest.DocTest(
        [e for e in examples if isinstance(e, doctest.Example)],
        global_environment,
        name,
        None,
        None,
        doctest_string,
    )

    doctestrunner = doctest.DocTestRunner(verbose=True)

    runresults = io.StringIO()
    with redirect_stdout(runresults), redirect_stderr(
            runresults), hide_outputs():
        doctestrunner.run(test, clear_globs=False)
    with open("/dev/null", "w") as f, redirect_stderr(f), redirect_stdout(f):
        result = doctestrunner.summarize(verbose=True)
    # An individual test can only pass or fail
    if result.failed == 0:
        return (True, "")
    else:
        return False, runresults.getvalue()
Exemplo n.º 3
0
 def run_setup_cleanup(runner, testcodes, what):
     # type: (Any, List[TestCode], Any) -> bool
     examples = []
     for testcode in testcodes:
         example = doctest.Example(testcode.code,
                                   "",
                                   lineno=testcode.lineno)
         examples.append(example)
     if not examples:
         return True
     # simulate a doctest with the code
     sim_doctest = doctest.DocTest(
         examples,
         {},
         "%s (%s code)" % (group.name, what),
         testcodes[0].filename,
         0,
         None,
     )
     sim_doctest.globs = ns
     old_f = runner.failures
     self.type = "exec"  # the snippet may contain multiple statements
     runner.run(sim_doctest, out=self._warn_out, clear_globs=False)
     if runner.failures > old_f:
         return False
     return True
Exemplo n.º 4
0
def run_tests(ipynb_path, globs):
    base_path = os.path.dirname(ipynb_path)
    test_files = glob(os.path.join(base_path, 'tests/q*.py'))
    tests = []
    doctestparser = doctest.DocTestParser()
    results = []
    for test_file in test_files:
        test_file_globals = {}
        with open(test_file) as f:
            doctestrunner = doctest.DocTestRunner()

            exec(f.read(), test_file_globals)
            defined_test = test_file_globals['test']
            assert len(defined_test['suites']) == 1
            assert defined_test['points'] == 1

            for case in defined_test['suites'][0]['cases']:
                examples = doctestparser.parse(
                    case['code'],
                    defined_test['name'],
                )
                test = doctest.DocTest(
                    [e for e in examples if type(e) is doctest.Example], globs,
                    defined_test['name'], None, None, None)
                with open('/dev/null',
                          'w') as f, redirect_stdout(f), redirect_stderr(f):
                    doctestrunner.run(test, clear_globs=False)
            with open('/dev/null',
                      'w') as f, redirect_stdout(f), redirect_stderr(f):
                result = doctestrunner.summarize()
            results.append(1 if result.failed == 0 else 0)
    return (sum(results) / len(results))
Exemplo n.º 5
0
def run_doctest(name, doctest_string, global_environment):
    """
    Run a single test with given ``global_environment``. Returns ``(True, '')`` if the doctest passes. 
    Returns ``(False, failure_message)`` if the doctest fails.

    Args:
        name (``str``): name of doctest
        doctest_string (``str``): doctest in string form
        global_environment (``dict``): global environment resulting from the execution of a python 
            script/notebook
    
    Returns:
        ``tuple`` of (``bool``, ``str``): results from running the test
    """
    examples = doctest.DocTestParser().parse(doctest_string, name)
    test = doctest.DocTest(
        [e for e in examples if isinstance(e, doctest.Example)],
        global_environment, name, None, None, doctest_string)

    doctestrunner = doctest.DocTestRunner(verbose=True)

    runresults = io.StringIO()
    with redirect_stdout(runresults), redirect_stderr(
            runresults), hide_outputs():
        doctestrunner.run(test, clear_globs=False)
    with open(os.devnull, 'w') as f, redirect_stderr(f), redirect_stdout(f):
        result = doctestrunner.summarize(verbose=True)
    # An individual test can only pass or fail
    if result.failed == 0:
        return (True, '')
    else:
        return False, runresults.getvalue()
Exemplo n.º 6
0
 def run_setup_cleanup(runner, testcodes, what):
     # type: (Any, List[TestCode], Any) -> bool
     examples = []
     for testcode in testcodes:
         examples.append(
             doctest.Example(  # type: ignore
                 doctest_encode(testcode.code,
                                self.env.config.source_encoding),
                 '',  # type: ignore  # NOQA
                 lineno=testcode.lineno))
     if not examples:
         return True
     # simulate a doctest with the code
     sim_doctest = doctest.DocTest(
         examples,
         {},  # type: ignore
         '%s (%s code)' % (group.name, what),
         filename_str,
         0,
         None)
     sim_doctest.globs = ns
     old_f = runner.failures
     self.type = 'exec'  # the snippet may contain multiple statements
     runner.run(sim_doctest, out=self._warn_out, clear_globs=False)
     if runner.failures > old_f:
         return False
     return True
Exemplo n.º 7
0
 def collect():
     self.ns.update(mod.__dict__)
     for example in examples:
         atest = dt.DocTest([example], self.ns, mod.__name__,
                            mod.__file__, maybe_lineof(func),
                            str(func.__doc__))
         yield scenario(atest)
Exemplo n.º 8
0
    def __call__(self, global_environment):
        """
        Run test with given global_environment.
        """
        test = doctest.DocTest(
            [e for e in self.examples if isinstance(e, doctest.Example)],
            global_environment,
            self.name,
            None,
            None,
            self.doctest_string
        )

        doctestrunner = doctest.DocTestRunner(verbose=True)

        runresults = io.StringIO()
        with redirect_stdout(runresults), redirect_stderr(runresults):
            doctestrunner.run(test, clear_globs=False)
        with open('/dev/null', 'w') as f, redirect_stderr(f), redirect_stdout(f):
            result = doctestrunner.summarize(verbose=True)
        # An individual test can only pass or fail
        if result.failed == 0:
            grade = 1.0
        else:
            grade = 0.0
        if grade == 1.0:
            summary = 'Test {} passed!'.format(self.name)
        else:
            summary = self.PLAIN_TEXT_SUMMARY_TEMPLATE.format(
                name=self.name,
                doctest_string=dedent(self.doctest_string),
                runresults=runresults.getvalue()
            )
        return TestResult(grade, summary)
Exemplo n.º 9
0
    def __call__(self, global_environment):
        """
        Run test with given global_environment.
        """
        test = doctest.DocTest(
            [e for e in self.examples if type(e) is doctest.Example],
            global_environment,
            self.name,
            None,
            None,
            self.doctest_string
        )

        doctestrunner = doctest.DocTestRunner(verbose=True)

        runresults = io.StringIO()
        with redirect_stdout(runresults), redirect_stderr(runresults):
            doctestrunner.run(test, clear_globs=False)
        with open('/dev/null', 'w') as f, redirect_stderr(f), redirect_stdout(f):
            result = doctestrunner.summarize(verbose=True)
        score = 1.0 - (result.failed / result.attempted)
        if score == 1.0:
            summary = 'Test {} passed!'.format(self.name)
        else:
            summary = self.PLAIN_TEXT_FAILURE_SUMMARY_TEMPLATE.format(
                name=self.name,
                doctest_string=dedent(self.doctest_string),
                runresults=runresults.getvalue()
            )
        return TestResult(score, summary)
Exemplo n.º 10
0
    def get_doctest(self, string, globs, name, filename, lineno):
        """
        Extract all doctest examples from the given string, and
        collect them into a `DocTest` object.

        `globs`, `name`, `filename`, and `lineno` are attributes for
        the new `DocTest` object.  See the documentation for `DocTest`
        for more information.
        """
        return doctest.DocTest(self.get_examples(string, name), globs, name,
                               filename, lineno, string)
Exemplo n.º 11
0
 def run_setup_cleanup(runner, testcodes, what):
     examples = []
     for testcode in testcodes:
         examples.append(
             doctest.Example(testcode.code, '', lineno=testcode.lineno))
     if not examples:
         return True
     # simulate a doctest with the code
     sim_doctest = doctest.DocTest(examples, {},
                                   '%s (%s code)' % (group.name, what),
                                   filename, 0, None)
     old_f = runner.failures
     self.type = 'exec'  # the snippet may contain multiple statements
     runner.run(sim_doctest, out=self._warn_out)
     if runner.failures > old_f:
         return False
     return True
Exemplo n.º 12
0
def make_suite():  # pragma: no cover
    from calmjs.parse.lexers import es5 as es5lexer
    from calmjs.parse import walkers
    from calmjs.parse import sourcemap

    def open(p, flag='r'):
        result = StringIO(examples[p] if flag == 'r' else '')
        result.name = p
        return result

    parser = doctest.DocTestParser()
    optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS

    dist = get_distribution('calmjs.parse')
    if dist:
        if dist.has_metadata('PKG-INFO'):
            pkgdesc = dist.get_metadata('PKG-INFO').replace('\r', '')
        elif dist.has_metadata('METADATA'):
            pkgdesc = dist.get_metadata('METADATA').replace('\r', '')
        else:
            pkgdesc = ''
    pkgdesc_tests = [
        t for t in parser.parse(pkgdesc) if isinstance(t, doctest.Example)
    ]

    test_loader = unittest.TestLoader()
    test_suite = test_loader.discover('calmjs.parse.tests',
                                      pattern='test_*.py',
                                      top_level_dir=dirname(__file__))
    test_suite.addTest(doctest.DocTestSuite(es5lexer, optionflags=optflags))
    test_suite.addTest(doctest.DocTestSuite(walkers, optionflags=optflags))
    test_suite.addTest(doctest.DocTestSuite(sourcemap, optionflags=optflags))
    test_suite.addTest(
        doctest.DocTestCase(
            # skipping all the error case tests which should all be in the
            # troubleshooting section at the end; bump the index whenever
            # more failure examples are added.
            # also note that line number is unknown, as PKG_INFO has headers
            # and also the counter is somehow inaccurate in this case.
            doctest.DocTest(pkgdesc_tests[:-1], {'open': open}, 'PKG_INFO',
                            'README.rst', None, pkgdesc),
            optionflags=optflags,
        ))

    return test_suite
Exemplo n.º 13
0
    def get_doctest(self, string, globs, name, filename, lineno):
        try:
            self.javascript_remote_session.connect()
            self.skip_javascript_tests = False
        except JavascriptSessionError as e:
            self.skip_javascript_tests = True
            ex = e

        globs = globs.copy()
        globs["_js_test"] = self.javascript_remote_session.test

        _doctest = doctest.DocTest(self.get_examples(string, name), globs,
                                   name, filename, lineno, string)

        if self.skip_javascript_tests and self.has_javascript_tests:
            print(
                "[Warning] The javascript tests will BE SKIPPED! because the connection failed:\n %s"
                % str(ex))

        return _doctest
Exemplo n.º 14
0
    def test_group(self, group, filename):

        j = Popen(["../julia"], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
        j.stdin.write("macro raw_str(s) s end;nothing\n".encode('utf-8'))
        j.stdin.write("_ans = nothing\n".encode('utf-8'))
        j.stdin.write("""
            if VERSION >= v"0.5.0-dev+1911"
                pushdisplay(TextDisplay(
                    IOContext(IOContext(STDOUT, :multiline => true), :limit => true)
                ));
                nothing
            end
            """.encode('utf-8'))
        self.setup_runner.julia = j
        self.test_runner.julia = j
        self.cleanup_runner.julia = j

        def run_setup_cleanup(runner, testcodes, what):
            examples = []
            for testcode in testcodes:
                examples.append(
                    doctest.Example(testcode.code, '', lineno=testcode.lineno))
            if not examples:
                return True
            # simulate a doctest with the code
            sim_doctest = doctest.DocTest(examples, {},
                                          '%s (%s code)' % (group.name, what),
                                          filename, 0, None)
            old_f = runner.failures
            self.type = 'exec'  # the snippet may contain multiple statements
            runner.run(sim_doctest, out=self._warn_out)
            if runner.failures > old_f:
                return False
            return True

        # run the setup code
        if not run_setup_cleanup(self.setup_runner, group.setup, 'setup'):
            # if setup failed, don't run the group
            return

        # run the tests
        for code in group.tests:
            if len(code) == 1:
                # ordinary doctests (code/output interleaved)
                try:
                    test = parser.get_doctest(code[0].code, {}, group.name,
                                              filename, code[0].lineno)
                except Exception:
                    self.warn(
                        'ignoring invalid doctest code: %r' % code[0].code,
                        '%s:%s' % (filename, code[0].lineno))
                    raise
                    continue
                if not test.examples:
                    continue
                for example in test.examples:
                    # apply directive's comparison options
                    new_opt = code[0].options.copy()
                    new_opt.update(example.options)
                    example.options = new_opt
                self.type = 'single'  # as for ordinary doctests
            else:
                # testcode and output separate
                output = code[1] and code[1].code or ''
                options = code[1] and code[1].options or {}
                # disable <BLANKLINE> processing as it is not needed
                options[doctest.DONT_ACCEPT_BLANKLINE] = True
                # find out if we're testing an exception
                m = parser._EXCEPTION_RE.match(output)
                if m:
                    exc_msg = m.group('msg')
                else:
                    exc_msg = None
                example = doctest.Example(code[0].code,
                                          output,
                                          exc_msg=exc_msg,
                                          lineno=code[0].lineno,
                                          options=options)
                test = doctest.DocTest([example], {}, group.name, filename,
                                       code[0].lineno, None)
                self.type = 'exec'  # multiple statements again
            self.test_runner.run(test, out=self._warn_out)

        # run the cleanup
        run_setup_cleanup(self.cleanup_runner, group.cleanup, 'cleanup')

        j.kill()
Exemplo n.º 15
0
def parse_rst_ipython_tests(rst, name, extraglobs=None, optionflags=None):
    """Extracts examples from an rst file and produce a test suite by running
  them through pandas to get the expected outputs.
  """

    # Optional dependency.
    import IPython
    from traitlets.config import Config

    def get_indent(line):
        return len(line) - len(line.lstrip())

    def is_example_line(line):
        line = line.strip()
        return line and not line.startswith(
            '#') and not line[0] == line[-1] == ':'

    IMPORT_PANDAS = 'import pandas as pd'

    example_srcs = []
    lines = iter([(lineno, line.rstrip())
                  for lineno, line in enumerate(rst.split('\n'))
                  if is_example_line(line)] + [(None, 'END')])

    # https://ipython.readthedocs.io/en/stable/sphinxext.html
    lineno, line = next(lines)
    while True:
        if line == 'END':
            break
        if line.startswith('.. ipython::'):
            lineno, line = next(lines)
            indent = get_indent(line)
            example = []
            example_srcs.append((lineno, example))
            while get_indent(line) >= indent:
                if '@verbatim' in line or ':verbatim:' in line or '@savefig' in line:
                    example_srcs.pop()
                    break
                line = re.sub(r'In \[\d+\]: ', '', line)
                line = re.sub(r'\.\.\.+:', '', line)
                example.append(line[indent:])
                lineno, line = next(lines)
                if get_indent(line) == indent and line[indent] not in ')]}':
                    example = []
                    example_srcs.append((lineno, example))
        else:
            lineno, line = next(lines)

    # TODO(robertwb): Would it be better to try and detect/compare the actual
    # objects in two parallel sessions than make (stringified) doctests?
    examples = []

    config = Config()
    config.HistoryManager.hist_file = ':memory:'
    config.InteractiveShell.autocall = False
    config.InteractiveShell.autoindent = False
    config.InteractiveShell.colors = 'NoColor'

    set_pandas_options()
    IP = IPython.InteractiveShell.instance(config=config)
    IP.run_cell(IMPORT_PANDAS + '\n')
    IP.run_cell('import numpy as np\n')
    try:
        stdout = sys.stdout
        for lineno, src in example_srcs:
            sys.stdout = cout = StringIO()
            src = '\n'.join(src)
            if src == IMPORT_PANDAS:
                continue
            IP.run_cell(src + '\n')
            output = cout.getvalue()
            if output:
                # Strip the prompt.
                # TODO(robertwb): Figure out how to suppress this.
                output = re.sub(r'^Out\[\d+\]:\s*', '', output)
            examples.append(doctest.Example(src, output, lineno=lineno))

    finally:
        sys.stdout = stdout

    return doctest.DocTest(examples, dict(extraglobs or {}, np=np), name, name,
                           None, None)
Exemplo n.º 16
0
    def test_group(self, group):
        # type: (TestGroup) -> None
        ns = {}  # type: Dict

        def run_setup_cleanup(runner, testcodes, what):
            # type: (Any, List[TestCode], Any) -> bool
            examples = []
            for testcode in testcodes:
                examples.append(
                    doctest.Example(  # type: ignore
                        doctest_encode(testcode.code,
                                       self.env.config.source_encoding),
                        '',  # type: ignore  # NOQA
                        lineno=testcode.lineno))
            if not examples:
                return True
            # simulate a doctest with the code
            sim_doctest = doctest.DocTest(examples, {},
                                          '%s (%s code)' % (group.name, what),
                                          testcodes[0].filename, 0, None)
            sim_doctest.globs = ns
            old_f = runner.failures
            self.type = 'exec'  # the snippet may contain multiple statements
            runner.run(sim_doctest, out=self._warn_out, clear_globs=False)
            if runner.failures > old_f:
                return False
            return True

        # run the setup code
        if not run_setup_cleanup(self.setup_runner, group.setup, 'setup'):
            # if setup failed, don't run the group
            return

        # run the tests
        for code in group.tests:
            if len(code) == 1:
                # ordinary doctests (code/output interleaved)
                try:
                    test = parser.get_doctest(  # type: ignore
                        doctest_encode(code[0].code,
                                       self.env.config.source_encoding),
                        {},  # type: ignore  # NOQA
                        group.name,
                        code[0].filename,
                        code[0].lineno)
                except Exception:
                    logger.warning(__('ignoring invalid doctest code: %r'),
                                   code[0].code,
                                   location=(code[0].filename, code[0].lineno))
                    continue
                if not test.examples:
                    continue
                for example in test.examples:
                    # apply directive's comparison options
                    new_opt = code[0].options.copy()
                    new_opt.update(example.options)
                    example.options = new_opt
                self.type = 'single'  # as for ordinary doctests
            else:
                # testcode and output separate
                output = code[1] and code[1].code or ''
                options = code[1] and code[1].options or {}
                # disable <BLANKLINE> processing as it is not needed
                options[doctest.DONT_ACCEPT_BLANKLINE] = True
                # find out if we're testing an exception
                m = parser._EXCEPTION_RE.match(output)  # type: ignore
                if m:
                    exc_msg = m.group('msg')
                else:
                    exc_msg = None
                example = doctest.Example(  # type: ignore
                    doctest_encode(code[0].code,
                                   self.env.config.source_encoding),
                    output,  # type: ignore  # NOQA
                    exc_msg=exc_msg,
                    lineno=code[0].lineno,
                    options=options)
                test = doctest.DocTest(
                    [example],
                    {},
                    group.name,  # type: ignore
                    code[0].filename,
                    code[0].lineno,
                    None)
                self.type = 'exec'  # multiple statements again
            # DocTest.__init__ copies the globs namespace, which we don't want
            test.globs = ns
            # also don't clear the globs namespace after running the doctest
            self.test_runner.run(test, out=self._warn_out, clear_globs=False)

        # run the cleanup
        run_setup_cleanup(self.cleanup_runner, group.cleanup, 'cleanup')
Exemplo n.º 17
0
    def test_group(self, group: extdoctest.TestGroup) -> None:
        ns: Mapping = self._make_group_globals(group)

        def run_setup_cleanup(runner, testcodes, what):
            # type: (Any, List[TestCode], Any) -> bool
            examples = []
            for testcode in testcodes:
                example = doctest.Example(testcode.code,
                                          "",
                                          lineno=testcode.lineno)
                examples.append(example)
            if not examples:
                return True
            # simulate a doctest with the code
            sim_doctest = doctest.DocTest(
                examples,
                {},
                "%s (%s code)" % (group.name, what),
                testcodes[0].filename,
                0,
                None,
            )
            sim_doctest.globs = ns
            old_f = runner.failures
            self.type = "exec"  # the snippet may contain multiple statements
            runner.run(sim_doctest, out=self._warn_out, clear_globs=False)
            if runner.failures > old_f:
                return False
            return True

        # run the setup code
        if not run_setup_cleanup(self.setup_runner, group.setup, "setup"):
            # if setup failed, don't run the group
            return

        # run the tests
        for code in group.tests:
            py_code = code[0]
            if len(code) == 1:
                # ordinary doctests (code/output interleaved)
                try:
                    test = extdoctest.parser.get_doctest(
                        py_code.code,
                        {},
                        group.name,  # type: ignore
                        py_code.filename,
                        py_code.lineno,
                    )
                except Exception as ex:
                    log.warning(
                        __("ignoring invalid doctest code: %r\n due to: %s"),
                        py_code.code,
                        ex,
                        location=(py_code.filename, py_code.lineno),
                    )
                    continue

                # HACK: allow collecting vars even if code empty..
                if not test.examples and not self.run_empty_code:
                    continue

                for example in test.examples:
                    # apply directive's comparison options
                    new_opt = py_code.options.copy()
                    new_opt.update(example.options)
                    example.options = new_opt
                self.type = "single"  # as for ordinary doctests
            else:
                # testcode and output separate
                output = code[1] and code[1].code or ""
                options = code[1] and code[1].options or {}
                # disable <BLANKLINE> processing as it is not needed
                options[doctest.DONT_ACCEPT_BLANKLINE] = True
                # find out if we're testing an exception
                m = extdoctest.parser._EXCEPTION_RE.match(
                    output)  # type: ignore
                if m:
                    exc_msg = m.group("msg")
                else:
                    exc_msg = None
                example = doctest.Example(
                    py_code.code,
                    output,
                    exc_msg=exc_msg,
                    lineno=py_code.lineno,
                    options=options,
                )
                test = doctest.DocTest([example], {}, group.name,
                                       py_code.filename, py_code.lineno, None)
                self.type = "exec"  # multiple statements again
            # DocTest.__init__ copies the globs namespace, which we don't want
            test.globs = ns
            # also don't clear the globs namespace after running the doctest
            self.test_runner.run(test, out=self._warn_out, clear_globs=False)

            ## HACK: collect plottable from doctest-runner globals.
            self._globals_updated(py_code, ns)

        # run the cleanup
        run_setup_cleanup(self.cleanup_runner, group.cleanup, "cleanup")