Beispiel #1
0
class CoverageTest(
        EnvironmentAwareMixin,
        StdStreamCapturingMixin,
        TempDirMixin,
        StopEverythingMixin,
        unittest.TestCase,
):
    """A base class for coverage.py test cases."""

    # Standard unittest setting: show me diffs even if they are very long.
    maxDiff = None

    # Tell newer unittest implementations to print long helpful messages.
    longMessage = True

    # Let stderr go to stderr, pytest will capture it for us.
    show_stderr = True

    # Temp dirs go to $TMPDIR/coverage_test/*
    temp_dir_prefix = "coverage_test/"
    if os.getenv('COVERAGE_ENV_ID'):  # pragma: debugging
        temp_dir_prefix += "{}/".format(os.getenv('COVERAGE_ENV_ID'))

    # Keep the temp directories if the env says to.
    # $set_env.py: COVERAGE_KEEP_TMP - Keep the temp directories made by tests.
    keep_temp_dir = bool(int(os.getenv("COVERAGE_KEEP_TMP", "0")))

    def setUp(self):
        super(CoverageTest, self).setUp()

        self.module_cleaner = SuperModuleCleaner()

        # Attributes for getting info about what happened.
        self.last_command_status = None
        self.last_command_output = None
        self.last_module_name = None

    def clean_local_file_imports(self):
        """Clean up the results of calls to `import_local_file`.

        Use this if you need to `import_local_file` the same file twice in
        one test.

        """
        self.module_cleaner.clean_local_file_imports()

    def start_import_stop(self, cov, modname, modfile=None):
        """Start coverage, import a file, then stop coverage.

        `cov` is started and stopped, with an `import_local_file` of
        `modname` in the middle. `modfile` is the file to import as `modname`
        if it isn't in the current directory.

        The imported module is returned.

        """
        cov.start()
        try:  # pragma: nested
            # Import the Python file, executing it.
            mod = import_local_file(modname, modfile)
        finally:  # pragma: nested
            # Stop coverage.py.
            cov.stop()
        return mod

    def get_module_name(self):
        """Return a random module name to use for this test run."""
        self.last_module_name = 'coverage_test_' + str(random.random())[2:]
        return self.last_module_name

    def _check_arcs(self, a1, a2, arc_type):
        """Check that the arc lists `a1` and `a2` are equal.

        If they are equal, return empty string. If they are unequal, return
        a string explaining what is different.
        """
        # Make them into multi-line strings so we can see what's going wrong.
        s1 = arcs_to_arcz_repr(a1)
        s2 = arcs_to_arcz_repr(a2)
        if s1 != s2:
            lines1 = s1.splitlines(True)
            lines2 = s2.splitlines(True)
            diff = "".join(difflib.ndiff(lines1, lines2))
            return "\n" + arc_type + " arcs differ: minus is expected, plus is actual\n" + diff
        else:
            return ""

    def check_coverage(
        self,
        text,
        lines=None,
        missing="",
        report="",
        excludes=None,
        partials="",
        arcz=None,
        arcz_missing=None,
        arcz_unpredicted=None,
        arcs=None,
        arcs_missing=None,
        arcs_unpredicted=None,
    ):
        """Check the coverage measurement of `text`.

        The source `text` is run and measured.  `lines` are the line numbers
        that are executable, or a list of possible line numbers, any of which
        could match. `missing` are the lines not executed, `excludes` are
        regexes to match against for excluding lines, and `report` is the text
        of the measurement report.

        For arc measurement, `arcz` is a string that can be decoded into arcs
        in the code (see `arcz_to_arcs` for the encoding scheme).
        `arcz_missing` are the arcs that are not executed, and
        `arcz_unpredicted` are the arcs executed in the code, but not deducible
        from the code.  These last two default to "", meaning we explicitly
        check that there are no missing or unpredicted arcs.

        Returns the Coverage object, in case you want to poke at it some more.

        """
        # We write the code into a file so that we can import it.
        # Coverage.py wants to deal with things as modules with file names.
        modname = self.get_module_name()

        self.make_file(modname + ".py", text)

        if arcs is None and arcz is not None:
            arcs = arcz_to_arcs(arcz)
        if arcs_missing is None and arcz_missing is not None:
            arcs_missing = arcz_to_arcs(arcz_missing)
        if arcs_unpredicted is None and arcz_unpredicted is not None:
            arcs_unpredicted = arcz_to_arcs(arcz_unpredicted)

        # Start up coverage.py.
        cov = coverage.Coverage(branch=True)
        cov.erase()
        for exc in excludes or []:
            cov.exclude(exc)
        for par in partials or []:
            cov.exclude(par, which='partial')

        mod = self.start_import_stop(cov, modname)

        # Clean up our side effects
        del sys.modules[modname]

        # Get the analysis results, and check that they are right.
        analysis = cov._analyze(mod)
        statements = sorted(analysis.statements)
        if lines is not None:
            if isinstance(lines[0], int):
                # lines is just a list of numbers, it must match the statements
                # found in the code.
                assert statements == lines, "{!r} != {!r}".format(
                    statements, lines)
            else:
                # lines is a list of possible line number lists, one of them
                # must match.
                for line_list in lines:
                    if statements == line_list:
                        break
                else:
                    self.fail("None of the lines choices matched %r" %
                              statements)

            missing_formatted = analysis.missing_formatted()
            if isinstance(missing, string_class):
                msg = "{!r} != {!r}".format(missing_formatted, missing)
                assert missing_formatted == missing, msg
            else:
                for missing_list in missing:
                    if missing_formatted == missing_list:
                        break
                else:
                    self.fail("None of the missing choices matched %r" %
                              missing_formatted)

        if arcs is not None:
            # print("Possible arcs:")
            # print(" expected:", arcs)
            # print(" actual:", analysis.arc_possibilities())
            # print("Executed:")
            # print(" actual:", sorted(set(analysis.arcs_executed())))
            # TODO: this would be nicer with pytest-check, once we can run that.
            msg = (
                self._check_arcs(arcs, analysis.arc_possibilities(),
                                 "Possible") +
                self._check_arcs(arcs_missing, analysis.arcs_missing(),
                                 "Missing") +
                self._check_arcs(arcs_unpredicted, analysis.arcs_unpredicted(),
                                 "Unpredicted"))
            if msg:
                assert False, msg

        if report:
            frep = StringIO()
            cov.report(mod, file=frep, show_missing=True)
            rep = " ".join(frep.getvalue().split("\n")[2].split()[1:])
            assert report == rep, "{!r} != {!r}".format(report, rep)

        return cov

    @contextlib.contextmanager
    def assert_warnings(self, cov, warnings, not_warnings=()):
        """A context manager to check that particular warnings happened in `cov`.

        `cov` is a Coverage instance.  `warnings` is a list of regexes.  Every
        regex must match a warning that was issued by `cov`.  It is OK for
        extra warnings to be issued by `cov` that are not matched by any regex.
        Warnings that are disabled are still considered issued by this function.

        `not_warnings` is a list of regexes that must not appear in the
        warnings.  This is only checked if there are some positive warnings to
        test for in `warnings`.

        If `warnings` is empty, then `cov` is not allowed to issue any
        warnings.

        """
        saved_warnings = []

        def capture_warning(msg, slug=None, once=False):  # pylint: disable=unused-argument
            """A fake implementation of Coverage._warn, to capture warnings."""
            # NOTE: we don't implement `once`.
            if slug:
                msg = "%s (%s)" % (msg, slug)
            saved_warnings.append(msg)

        original_warn = cov._warn
        cov._warn = capture_warning

        try:
            yield
        except:  # pylint: disable=try-except-raise
            raise
        else:
            if warnings:
                for warning_regex in warnings:
                    for saved in saved_warnings:
                        if re.search(warning_regex, saved):
                            break
                    else:
                        self.fail("Didn't find warning %r in %r" %
                                  (warning_regex, saved_warnings))
                for warning_regex in not_warnings:
                    for saved in saved_warnings:
                        if re.search(warning_regex, saved):
                            self.fail("Found warning %r in %r" %
                                      (warning_regex, saved_warnings))
            else:
                # No warnings expected. Raise if any warnings happened.
                if saved_warnings:
                    self.fail("Unexpected warnings: %r" % (saved_warnings, ))
        finally:
            cov._warn = original_warn

    def nice_file(self, *fparts):
        """Canonicalize the file name composed of the parts in `fparts`."""
        fname = os.path.join(*fparts)
        return os.path.normcase(os.path.abspath(os.path.realpath(fname)))

    def assert_same_files(self, flist1, flist2):
        """Assert that `flist1` and `flist2` are the same set of file names."""
        flist1_nice = [self.nice_file(f) for f in flist1]
        flist2_nice = [self.nice_file(f) for f in flist2]
        assert_count_equal(flist1_nice, flist2_nice)

    def assert_exists(self, fname):
        """Assert that `fname` is a file that exists."""
        msg = "File %r should exist" % fname
        assert os.path.exists(fname), msg

    def assert_doesnt_exist(self, fname):
        """Assert that `fname` is a file that doesn't exist."""
        msg = "File %r shouldn't exist" % fname
        assert not os.path.exists(fname), msg

    def assert_file_count(self, pattern, count):
        """Assert that there are `count` files matching `pattern`."""
        files = sorted(glob.glob(pattern))
        msg = "There should be {} files matching {!r}, but there are these: {}"
        msg = msg.format(count, pattern, files)
        assert len(files) == count, msg

    def assert_starts_with(self, s, prefix, msg=None):
        """Assert that `s` starts with `prefix`."""
        if not s.startswith(prefix):
            self.fail(msg or ("%r doesn't start with %r" % (s, prefix)))

    def assert_recent_datetime(self, dt, seconds=10, msg=None):
        """Assert that `dt` marks a time at most `seconds` seconds ago."""
        age = datetime.datetime.now() - dt
        assert age.total_seconds() >= 0, msg
        assert age.total_seconds() <= seconds, msg

    def command_line(self, args, ret=OK):
        """Run `args` through the command line.

        Use this when you want to run the full coverage machinery, but in the
        current process.  Exceptions may be thrown from deep in the code.
        Asserts that `ret` is returned by `CoverageScript.command_line`.

        Compare with `run_command`.

        Returns None.

        """
        ret_actual = command_line(args)
        assert ret_actual == ret, "{!r} != {!r}".format(ret_actual, ret)

    # Some distros rename the coverage command, and need a way to indicate
    # their new command name to the tests. This is here for them to override,
    # for example:
    # https://salsa.debian.org/debian/pkg-python-coverage/-/blob/master/debian/patches/02.rename-public-programs.patch
    coverage_command = "coverage"

    def run_command(self, cmd):
        """Run the command-line `cmd` in a sub-process.

        `cmd` is the command line to invoke in a sub-process. Returns the
        combined content of `stdout` and `stderr` output streams from the
        sub-process.

        See `run_command_status` for complete semantics.

        Use this when you need to test the process behavior of coverage.

        Compare with `command_line`.

        """
        _, output = self.run_command_status(cmd)
        return output

    def run_command_status(self, cmd):
        """Run the command-line `cmd` in a sub-process, and print its output.

        Use this when you need to test the process behavior of coverage.

        Compare with `command_line`.

        Handles the following command names specially:

        * "python" is replaced with the command name of the current
            Python interpreter.

        * "coverage" is replaced with the command name for the main
            coverage.py program.

        Returns a pair: the process' exit status and its stdout/stderr text,
        which are also stored as `self.last_command_status` and
        `self.last_command_output`.

        """
        # Make sure "python" and "coverage" mean specifically what we want
        # them to mean.
        split_commandline = cmd.split()
        command_name = split_commandline[0]
        command_args = split_commandline[1:]

        if command_name == "python":
            # Running a Python interpreter in a sub-processes can be tricky.
            # Use the real name of our own executable. So "python foo.py" might
            # get executed as "python3.3 foo.py". This is important because
            # Python 3.x doesn't install as "python", so you might get a Python
            # 2 executable instead if you don't use the executable's basename.
            command_words = [os.path.basename(sys.executable)]

        elif command_name == "coverage":
            if env.JYTHON:  # pragma: only jython
                # Jython can't do reporting, so let's skip the test now.
                if command_args and command_args[0] in ('report', 'html',
                                                        'xml', 'annotate'):
                    pytest.skip("Can't run reporting commands in Jython")
                # Jython can't run "coverage" as a command because the shebang
                # refers to another shebang'd Python script. So run them as
                # modules.
                command_words = "jython -m coverage".split()
            else:
                # The invocation requests the coverage.py program.  Substitute the
                # actual coverage.py main command name.
                command_words = [self.coverage_command]

        else:
            command_words = [command_name]

        cmd = " ".join([shlex_quote(w) for w in command_words] + command_args)

        # Add our test modules directory to PYTHONPATH.  I'm sure there's too
        # much path munging here, but...
        pythonpath_name = "PYTHONPATH"
        if env.JYTHON:
            pythonpath_name = "JYTHONPATH"  # pragma: only jython

        testmods = self.nice_file(self.working_root(), 'tests/modules')
        zipfile = self.nice_file(self.working_root(), 'tests/zipmods.zip')
        pypath = os.getenv(pythonpath_name, '')
        if pypath:
            pypath += os.pathsep
        pypath += testmods + os.pathsep + zipfile
        self.set_environ(pythonpath_name, pypath)

        self.last_command_status, self.last_command_output = run_command(cmd)
        print(self.last_command_output)
        return self.last_command_status, self.last_command_output

    def working_root(self):
        """Where is the root of the coverage.py working tree?"""
        return os.path.dirname(self.nice_file(coverage.__file__, ".."))

    def report_from_command(self, cmd):
        """Return the report from the `cmd`, with some convenience added."""
        report = self.run_command(cmd).replace('\\', '/')
        assert "error" not in report.lower()
        return report

    def report_lines(self, report):
        """Return the lines of the report, as a list."""
        lines = report.split('\n')
        assert lines[-1] == ""
        return lines[:-1]

    def line_count(self, report):
        """How many lines are in `report`?"""
        return len(self.report_lines(report))

    def squeezed_lines(self, report):
        """Return a list of the lines in report, with the spaces squeezed."""
        lines = self.report_lines(report)
        return [re.sub(r"\s+", " ", l.strip()) for l in lines]

    def last_line_squeezed(self, report):
        """Return the last line of `report` with the spaces squeezed down."""
        return self.squeezed_lines(report)[-1]

    def get_measured_filenames(self, coverage_data):
        """Get paths to measured files.

        Returns a dict of {filename: absolute path to file}
        for given CoverageData.
        """
        return {
            os.path.basename(filename): filename
            for filename in coverage_data.measured_files()
        }
Beispiel #2
0
class StressTest(object):
    def __init__(self):
        self.module_cleaner = SuperModuleCleaner()

    def _run_scenario(self, file_count, call_count, line_count):
        self.module_cleaner.clean_local_file_imports()

        for idx in range(file_count):
            make_file('test{}.py'.format(idx), TEST_FILE)
        make_file('testmain.py', mk_main(file_count, call_count, line_count))

        # Run it once just to get the disk caches loaded up.
        import_local_file("testmain")
        self.module_cleaner.clean_local_file_imports()

        # Run it to get the baseline time.
        start = time.perf_counter()
        import_local_file("testmain")
        baseline = time.perf_counter() - start
        self.module_cleaner.clean_local_file_imports()

        # Run it to get the covered time.
        start = time.perf_counter()
        cov = coverage.Coverage()
        cov.start()
        try:  # pragma: nested
            # Import the Python file, executing it.
            import_local_file("testmain")
        finally:  # pragma: nested
            # Stop coverage.py.
            covered = time.perf_counter() - start
            stats = cov._collector.tracers[0].get_stats()
            if stats:
                stats = stats.copy()
            cov.stop()

        return baseline, covered, stats

    def _compute_overhead(self, file_count, call_count, line_count):
        baseline, covered, stats = self._run_scenario(file_count, call_count,
                                                      line_count)

        #print("baseline = {:.2f}, covered = {:.2f}".format(baseline, covered))
        # Empirically determined to produce the same numbers as the collected
        # stats from get_stats(), with Python 3.6.
        actual_file_count = 17 + file_count
        actual_call_count = file_count * call_count + 156 * file_count + 85
        actual_line_count = (2 * file_count * call_count * line_count +
                             3 * file_count * call_count + 769 * file_count +
                             345)

        if stats is not None:
            assert actual_file_count == stats['files']
            assert actual_call_count == stats['calls']
            assert actual_line_count == stats['lines']
            print("File counts", file_count, actual_file_count, stats['files'])
            print("Call counts", call_count, actual_call_count, stats['calls'])
            print("Line counts", line_count, actual_line_count, stats['lines'])
            print()

        return StressResult(
            actual_file_count,
            actual_call_count,
            actual_line_count,
            baseline,
            covered,
        )

    fixed = 200
    numlo = 100
    numhi = 100
    step = 50
    runs = 5

    def count_operations(self):
        def operations(thing):
            for _ in range(self.runs):
                for n in range(self.numlo, self.numhi + 1, self.step):
                    kwargs = {
                        "file_count": self.fixed,
                        "call_count": self.fixed,
                        "line_count": self.fixed,
                    }
                    kwargs[thing + "_count"] = n
                    yield kwargs['file_count'] * kwargs['call_count'] * kwargs[
                        'line_count']

        ops = sum(sum(operations(thing)) for thing in ["file", "call", "line"])
        print("{:.1f}M operations".format(ops / 1e6))

    def check_coefficients(self):
        # For checking the calculation of actual stats:
        for f in range(1, 6):
            for c in range(1, 6):
                for l in range(1, 6):
                    _, _, stats = self._run_scenario(f, c, l)
                    print(
                        "{0},{1},{2},{3[files]},{3[calls]},{3[lines]}".format(
                            f, c, l, stats))

    def stress_test(self):
        # For checking the overhead for each component:
        def time_thing(thing):
            per_thing = []
            pct_thing = []
            for _ in range(self.runs):
                for n in range(self.numlo, self.numhi + 1, self.step):
                    kwargs = {
                        "file_count": self.fixed,
                        "call_count": self.fixed,
                        "line_count": self.fixed,
                    }
                    kwargs[thing + "_count"] = n
                    res = self._compute_overhead(**kwargs)
                    per_thing.append(res.overhead /
                                     getattr(res, "{}s".format(thing)))
                    pct_thing.append(res.covered / res.baseline * 100)

            out = "Per {}: ".format(thing)
            out += "mean = {:9.3f}us, stddev = {:8.3f}us, ".format(
                statistics.mean(per_thing) * 1e6,
                statistics.stdev(per_thing) * 1e6)
            out += "min = {:9.3f}us, ".format(min(per_thing) * 1e6)
            out += "pct = {:6.1f}%, stddev = {:6.1f}%".format(
                statistics.mean(pct_thing), statistics.stdev(pct_thing))
            print(out)

        time_thing("file")
        time_thing("call")
        time_thing("line")
Beispiel #3
0
class CoverageTest(
        EnvironmentAwareMixin,
        StdStreamCapturingMixin,
        TempDirMixin,
        DelayedAssertionMixin,
        CoverageTestMethodsMixin,
        TestCase,
):
    """A base class for coverage.py test cases."""

    # Standard unittest setting: show me diffs even if they are very long.
    maxDiff = None

    # Tell newer unittest implementations to print long helpful messages.
    longMessage = True

    # Let stderr go to stderr, pytest will capture it for us.
    show_stderr = True

    def setUp(self):
        super(CoverageTest, self).setUp()

        self.module_cleaner = SuperModuleCleaner()

        # Attributes for getting info about what happened.
        self.last_command_status = None
        self.last_command_output = None
        self.last_module_name = None

        if _TEST_NAME_FILE:  # pragma: debugging
            with open(_TEST_NAME_FILE, "w") as f:
                f.write("%s_%s" % (
                    self.__class__.__name__,
                    self._testMethodName,
                ))

    def clean_local_file_imports(self):
        """Clean up the results of calls to `import_local_file`.

        Use this if you need to `import_local_file` the same file twice in
        one test.

        """
        self.module_cleaner.clean_local_file_imports()

    def start_import_stop(self, cov, modname, modfile=None):
        """Start coverage, import a file, then stop coverage.

        `cov` is started and stopped, with an `import_local_file` of
        `modname` in the middle. `modfile` is the file to import as `modname`
        if it isn't in the current directory.

        The imported module is returned.

        """
        cov.start()
        try:  # pragma: nested
            # Import the Python file, executing it.
            mod = import_local_file(modname, modfile)
        finally:  # pragma: nested
            # Stop coverage.py.
            cov.stop()
        return mod

    def get_module_name(self):
        """Return a random module name to use for this test run."""
        self.last_module_name = 'coverage_test_' + str(random.random())[2:]
        return self.last_module_name

    # Map chars to numbers for arcz_to_arcs
    _arcz_map = {'.': -1}
    _arcz_map.update(dict((c, ord(c) - ord('0')) for c in '123456789'))
    _arcz_map.update(
        dict(
            (c, 10 + ord(c) - ord('A')) for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'))

    def arcz_to_arcs(self, arcz):
        """Convert a compact textual representation of arcs to a list of pairs.

        The text has space-separated pairs of letters.  Period is -1, 1-9 are
        1-9, A-Z are 10 through 36.  The resulting list is sorted regardless of
        the order of the input pairs.

        ".1 12 2." --> [(-1,1), (1,2), (2,-1)]

        Minus signs can be included in the pairs:

        "-11, 12, 2-5" --> [(-1,1), (1,2), (2,-5)]

        """
        arcs = []
        for pair in arcz.split():
            asgn = bsgn = 1
            if len(pair) == 2:
                a, b = pair
            else:
                assert len(pair) == 3
                if pair[0] == '-':
                    _, a, b = pair
                    asgn = -1
                else:
                    assert pair[1] == '-'
                    a, _, b = pair
                    bsgn = -1
            arcs.append((asgn * self._arcz_map[a], bsgn * self._arcz_map[b]))
        return sorted(arcs)

    def assert_equal_args(self, a1, a2, msg=None):
        """Assert that the arc lists `a1` and `a2` are equal."""
        # Make them into multi-line strings so we can see what's going wrong.
        s1 = "\n".join(repr(a) for a in a1) + "\n"
        s2 = "\n".join(repr(a) for a in a2) + "\n"
        self.assertMultiLineEqual(s1, s2, msg)

    def check_coverage(
        self,
        text,
        lines=None,
        missing="",
        report="",
        excludes=None,
        partials="",
        arcz=None,
        arcz_missing="",
        arcz_unpredicted="",
        arcs=None,
        arcs_missing=None,
        arcs_unpredicted=None,
    ):
        """Check the coverage measurement of `text`.

        The source `text` is run and measured.  `lines` are the line numbers
        that are executable, or a list of possible line numbers, any of which
        could match. `missing` are the lines not executed, `excludes` are
        regexes to match against for excluding lines, and `report` is the text
        of the measurement report.

        For arc measurement, `arcz` is a string that can be decoded into arcs
        in the code (see `arcz_to_arcs` for the encoding scheme).
        `arcz_missing` are the arcs that are not executed, and
        `arcz_unpredicted` are the arcs executed in the code, but not deducible
        from the code.  These last two default to "", meaning we explicitly
        check that there are no missing or unpredicted arcs.

        Returns the Coverage object, in case you want to poke at it some more.

        """
        # We write the code into a file so that we can import it.
        # Coverage.py wants to deal with things as modules with file names.
        modname = self.get_module_name()

        self.make_file(modname + ".py", text)

        if arcs is None and arcz is not None:
            arcs = self.arcz_to_arcs(arcz)
        if arcs_missing is None:
            arcs_missing = self.arcz_to_arcs(arcz_missing)
        if arcs_unpredicted is None:
            arcs_unpredicted = self.arcz_to_arcs(arcz_unpredicted)

        # Start up coverage.py.
        cov = coverage.Coverage(branch=True)
        cov.erase()
        for exc in excludes or []:
            cov.exclude(exc)
        for par in partials or []:
            cov.exclude(par, which='partial')

        mod = self.start_import_stop(cov, modname)

        # Clean up our side effects
        del sys.modules[modname]

        # Get the analysis results, and check that they are right.
        analysis = cov._analyze(mod)
        statements = sorted(analysis.statements)
        if lines is not None:
            if isinstance(lines[0], int):
                # lines is just a list of numbers, it must match the statements
                # found in the code.
                self.assertEqual(statements, lines)
            else:
                # lines is a list of possible line number lists, one of them
                # must match.
                for line_list in lines:
                    if statements == line_list:
                        break
                else:
                    self.fail("None of the lines choices matched %r" %
                              statements)

            missing_formatted = analysis.missing_formatted()
            if isinstance(missing, string_class):
                self.assertEqual(missing_formatted, missing)
            else:
                for missing_list in missing:
                    if missing_formatted == missing_list:
                        break
                else:
                    self.fail("None of the missing choices matched %r" %
                              missing_formatted)

        if arcs is not None:
            with self.delayed_assertions():
                self.assert_equal_args(
                    analysis.arc_possibilities(), arcs,
                    "Possible arcs differ: minus is actual, plus is expected")

                self.assert_equal_args(
                    analysis.arcs_missing(), arcs_missing,
                    "Missing arcs differ: minus is actual, plus is expected")

                self.assert_equal_args(
                    analysis.arcs_unpredicted(), arcs_unpredicted,
                    "Unpredicted arcs differ: minus is actual, plus is expected"
                )

        if report:
            frep = StringIO()
            cov.report(mod, file=frep, show_missing=True)
            rep = " ".join(frep.getvalue().split("\n")[2].split()[1:])
            self.assertEqual(report, rep)

        return cov

    @contextlib.contextmanager
    def assert_warnings(self, cov, warnings, not_warnings=()):
        """A context manager to check that particular warnings happened in `cov`.

        `cov` is a Coverage instance.  `warnings` is a list of regexes.  Every
        regex must match a warning that was issued by `cov`.  It is OK for
        extra warnings to be issued by `cov` that are not matched by any regex.

        `not_warnings` is a list of regexes that must not appear in the
        warnings.  This is only checked if there are some positive warnings to
        test for in `warnings`.

        If `warnings` is empty, then `cov` is not allowed to issue any
        warnings.

        """
        saved_warnings = []

        def capture_warning(msg, slug=None):  # pylint: disable=unused-argument
            """A fake implementation of Coverage._warn, to capture warnings."""
            if slug:
                msg = "%s (%s)" % (msg, slug)
            saved_warnings.append(msg)

        original_warn = cov._warn
        cov._warn = capture_warning

        try:
            yield
        except:
            raise
        else:
            if warnings:
                for warning_regex in warnings:
                    for saved in saved_warnings:
                        if re.search(warning_regex, saved):
                            break
                    else:
                        self.fail("Didn't find warning %r in %r" %
                                  (warning_regex, saved_warnings))
                for warning_regex in not_warnings:
                    for saved in saved_warnings:
                        if re.search(warning_regex, saved):
                            self.fail("Found warning %r in %r" %
                                      (warning_regex, saved_warnings))
            else:
                # No warnings expected. Raise if any warnings happened.
                if saved_warnings:
                    self.fail("Unexpected warnings: %r" % (saved_warnings, ))
        finally:
            cov._warn = original_warn

    def nice_file(self, *fparts):
        """Canonicalize the file name composed of the parts in `fparts`."""
        fname = os.path.join(*fparts)
        return os.path.normcase(os.path.abspath(os.path.realpath(fname)))

    def assert_same_files(self, flist1, flist2):
        """Assert that `flist1` and `flist2` are the same set of file names."""
        flist1_nice = [self.nice_file(f) for f in flist1]
        flist2_nice = [self.nice_file(f) for f in flist2]
        self.assertCountEqual(flist1_nice, flist2_nice)

    def assert_exists(self, fname):
        """Assert that `fname` is a file that exists."""
        msg = "File %r should exist" % fname
        self.assertTrue(os.path.exists(fname), msg)

    def assert_doesnt_exist(self, fname):
        """Assert that `fname` is a file that doesn't exist."""
        msg = "File %r shouldn't exist" % fname
        self.assertTrue(not os.path.exists(fname), msg)

    def assert_starts_with(self, s, prefix, msg=None):
        """Assert that `s` starts with `prefix`."""
        if not s.startswith(prefix):
            self.fail(msg or ("%r doesn't start with %r" % (s, prefix)))

    def assert_recent_datetime(self, dt, seconds=10, msg=None):
        """Assert that `dt` marks a time at most `seconds` seconds ago."""
        age = datetime.datetime.now() - dt
        # Python2.6 doesn't have total_seconds :(
        self.assertEqual(age.days, 0, msg)
        self.assertGreaterEqual(age.seconds, 0, msg)
        self.assertLessEqual(age.seconds, seconds, msg)

    def command_line(self, args, ret=OK, _covpkg=None):
        """Run `args` through the command line.

        Use this when you want to run the full coverage machinery, but in the
        current process.  Exceptions may be thrown from deep in the code.
        Asserts that `ret` is returned by `CoverageScript.command_line`.

        Compare with `run_command`.

        Returns None.

        """
        ret_actual = command_line(args, _covpkg=_covpkg)
        self.assertEqual(ret_actual, ret)

    coverage_command = "coverage"

    def run_command(self, cmd):
        """Run the command-line `cmd` in a sub-process.

        `cmd` is the command line to invoke in a sub-process. Returns the
        combined content of `stdout` and `stderr` output streams from the
        sub-process.

        See `run_command_status` for complete semantics.

        Use this when you need to test the process behavior of coverage.

        Compare with `command_line`.

        """
        _, output = self.run_command_status(cmd)
        return output

    def run_command_status(self, cmd):
        """Run the command-line `cmd` in a sub-process, and print its output.

        Use this when you need to test the process behavior of coverage.

        Compare with `command_line`.

        Handles the following command names specially:

        * "python" is replaced with the command name of the current
            Python interpreter.

        * "coverage" is replaced with the command name for the main
            Coverage.py program.

        Returns a pair: the process' exit status and its stdout/stderr text,
        which are also stored as `self.last_command_status` and
        `self.last_command_output`.

        """
        # Make sure "python" and "coverage" mean specifically what we want
        # them to mean.
        split_commandline = cmd.split()
        command_name = split_commandline[0]
        command_args = split_commandline[1:]

        if command_name == "python":
            # Running a Python interpreter in a sub-processes can be tricky.
            # Use the real name of our own executable. So "python foo.py" might
            # get executed as "python3.3 foo.py". This is important because
            # Python 3.x doesn't install as "python", so you might get a Python
            # 2 executable instead if you don't use the executable's basename.
            command_words = [os.path.basename(sys.executable)]

        elif command_name == "coverage":
            if env.JYTHON:  # pragma: only jython
                # Jython can't do reporting, so let's skip the test now.
                if command_args and command_args[0] in ('report', 'html',
                                                        'xml', 'annotate'):
                    self.skipTest("Can't run reporting commands in Jython")
                # Jython can't run "coverage" as a command because the shebang
                # refers to another shebang'd Python script. So run them as
                # modules.
                command_words = "jython -m coverage".split()
            else:
                # The invocation requests the Coverage.py program.  Substitute the
                # actual Coverage.py main command name.
                command_words = [self.coverage_command]

        else:
            command_words = [command_name]

        cmd = " ".join([shlex_quote(w) for w in command_words] + command_args)

        # Add our test modules directory to PYTHONPATH.  I'm sure there's too
        # much path munging here, but...
        pythonpath_name = "PYTHONPATH"
        if env.JYTHON:
            pythonpath_name = "JYTHONPATH"  # pragma: only jython

        testmods = self.nice_file(self.working_root(), 'tests/modules')
        zipfile = self.nice_file(self.working_root(), 'tests/zipmods.zip')
        pypath = os.getenv(pythonpath_name, '')
        if pypath:
            pypath += os.pathsep
        pypath += testmods + os.pathsep + zipfile
        self.set_environ(pythonpath_name, pypath)

        self.last_command_status, self.last_command_output = run_command(cmd)
        print(self.last_command_output)
        return self.last_command_status, self.last_command_output

    def working_root(self):
        """Where is the root of the coverage.py working tree?"""
        return os.path.dirname(self.nice_file(coverage.__file__, ".."))

    def report_from_command(self, cmd):
        """Return the report from the `cmd`, with some convenience added."""
        report = self.run_command(cmd).replace('\\', '/')
        self.assertNotIn("error", report.lower())
        return report

    def report_lines(self, report):
        """Return the lines of the report, as a list."""
        lines = report.split('\n')
        self.assertEqual(lines[-1], "")
        return lines[:-1]

    def line_count(self, report):
        """How many lines are in `report`?"""
        return len(self.report_lines(report))

    def squeezed_lines(self, report):
        """Return a list of the lines in report, with the spaces squeezed."""
        lines = self.report_lines(report)
        return [re.sub(r"\s+", " ", l.strip()) for l in lines]

    def last_line_squeezed(self, report):
        """Return the last line of `report` with the spaces squeezed down."""
        return self.squeezed_lines(report)[-1]
Beispiel #4
0
class StressTest(object):

    def __init__(self):
        self.module_cleaner = SuperModuleCleaner()

    def _run_scenario(self, file_count, call_count, line_count):
        self.module_cleaner.clean_local_file_imports()

        for idx in range(file_count):
            make_file('test{}.py'.format(idx), TEST_FILE)
        make_file('testmain.py', mk_main(file_count, call_count, line_count))

        # Run it once just to get the disk caches loaded up.
        import_local_file("testmain")
        self.module_cleaner.clean_local_file_imports()

        # Run it to get the baseline time.
        start = time.perf_counter()
        import_local_file("testmain")
        baseline = time.perf_counter() - start
        self.module_cleaner.clean_local_file_imports()

        # Run it to get the covered time.
        start = time.perf_counter()
        cov = coverage.Coverage()
        cov.start()
        try:                                    # pragma: nested
            # Import the Python file, executing it.
            import_local_file("testmain")
        finally:                                # pragma: nested
            # Stop coverage.py.
            covered = time.perf_counter() - start
            stats = cov._collector.tracers[0].get_stats()
            if stats:
                stats = stats.copy()
            cov.stop()

        return baseline, covered, stats

    def _compute_overhead(self, file_count, call_count, line_count):
        baseline, covered, stats = self._run_scenario(file_count, call_count, line_count)

        #print("baseline = {:.2f}, covered = {:.2f}".format(baseline, covered))
        # Empirically determined to produce the same numbers as the collected
        # stats from get_stats(), with Python 3.6.
        actual_file_count = 17 + file_count
        actual_call_count = file_count * call_count + 156 * file_count + 85
        actual_line_count = (
            2 * file_count * call_count * line_count +
            3 * file_count * call_count +
            769 * file_count +
            345
        )

        if stats is not None:
            assert actual_file_count == stats['files']
            assert actual_call_count == stats['calls']
            assert actual_line_count == stats['lines']
            print("File counts", file_count, actual_file_count, stats['files'])
            print("Call counts", call_count, actual_call_count, stats['calls'])
            print("Line counts", line_count, actual_line_count, stats['lines'])
            print()

        return StressResult(
            actual_file_count,
            actual_call_count,
            actual_line_count,
            baseline,
            covered,
        )

    fixed = 200
    numlo = 100
    numhi = 100
    step = 50
    runs = 5

    def count_operations(self):

        def operations(thing):
            for _ in range(self.runs):
                for n in range(self.numlo, self.numhi+1, self.step):
                    kwargs = {
                        "file_count": self.fixed,
                        "call_count": self.fixed,
                        "line_count": self.fixed,
                    }
                    kwargs[thing+"_count"] = n
                    yield kwargs['file_count'] * kwargs['call_count'] * kwargs['line_count']

        ops = sum(sum(operations(thing)) for thing in ["file", "call", "line"])
        print("{0:.1f}M operations".format(ops/1e6))

    def check_coefficients(self):
        # For checking the calculation of actual stats:
        for f in range(1, 6):
            for c in range(1, 6):
                for l in range(1, 6):
                    _, _, stats = self._run_scenario(f, c, l)
                    print("{0},{1},{2},{3[files]},{3[calls]},{3[lines]}".format(f, c, l, stats))

    def stress_test(self):
        # For checking the overhead for each component:
        def time_thing(thing):
            per_thing = []
            pct_thing = []
            for _ in range(self.runs):
                for n in range(self.numlo, self.numhi+1, self.step):
                    kwargs = {
                        "file_count": self.fixed,
                        "call_count": self.fixed,
                        "line_count": self.fixed,
                    }
                    kwargs[thing+"_count"] = n
                    res = self._compute_overhead(**kwargs)
                    per_thing.append(res.overhead / getattr(res, "{}s".format(thing)))
                    pct_thing.append(res.covered / res.baseline * 100)

            out = "Per {}: ".format(thing)
            out += "mean = {:9.3f}us, stddev = {:8.3f}us, ".format(
                statistics.mean(per_thing)*1e6, statistics.stdev(per_thing)*1e6
            )
            out += "min = {:9.3f}us, ".format(min(per_thing)*1e6)
            out += "pct = {:6.1f}%, stddev = {:6.1f}%".format(
                statistics.mean(pct_thing), statistics.stdev(pct_thing)
            )
            print(out)

        time_thing("file")
        time_thing("call")
        time_thing("line")