Ejemplo n.º 1
0
 def test_simple(self):
     self.make_file("hello.py")
     fl = FileLocator()
     self.assertEqual(fl.relative_filename("hello.py"), "hello.py")
     a = self.abs_path("hello.py")
     self.assertNotEqual(a, "hello.py")
     self.assertEqual(fl.relative_filename(a), "hello.py")
Ejemplo n.º 2
0
 def test_simple(self):
     self.make_file("hello.py")
     fl = FileLocator()
     self.assertEqual(fl.relative_filename("hello.py"), "hello.py")
     a = self.abs_path("hello.py")
     self.assertNotEqual(a, "hello.py")
     self.assertEqual(fl.relative_filename(a), "hello.py")
Ejemplo n.º 3
0
 def test_comparison(self):
     acu = code_unit_factory("aa/afile.py", FileLocator())[0]
     acu2 = code_unit_factory("aa/afile.py", FileLocator())[0]
     zcu = code_unit_factory("aa/zfile.py", FileLocator())[0]
     bcu = code_unit_factory("aa/bb/bfile.py", FileLocator())[0]
     assert acu == acu2 and acu <= acu2 and acu >= acu2
     assert acu < zcu and acu <= zcu and acu != zcu
     assert zcu > acu and zcu >= acu and zcu != acu
     assert acu < bcu and acu <= bcu and acu != bcu
     assert bcu > acu and bcu >= acu and bcu != acu
Ejemplo n.º 4
0
 def test_peer_directories(self):
     self.make_file("sub/proj1/file1.py")
     self.make_file("sub/proj2/file2.py")
     a1 = self.abs_path("sub/proj1/file1.py")
     a2 = self.abs_path("sub/proj2/file2.py")
     d = os.path.normpath("sub/proj1")
     os.chdir(d)
     fl = FileLocator()
     self.assertEqual(fl.relative_filename(a1), "file1.py")
     self.assertEqual(fl.relative_filename(a2), a2)
Ejemplo n.º 5
0
 def test_peer_directories(self):
     self.make_file("sub/proj1/file1.py")
     self.make_file("sub/proj2/file2.py")
     a1 = self.abs_path("sub/proj1/file1.py")
     a2 = self.abs_path("sub/proj2/file2.py")
     d = os.path.normpath("sub/proj1")
     os.chdir(d)
     fl = FileLocator()
     self.assertEqual(fl.relative_filename(a1), "file1.py")
     self.assertEqual(fl.relative_filename(a2), a2)
Ejemplo n.º 6
0
 def test_filepath_contains_absolute_prefix_twice(self):
     # https://bitbucket.org/ned/coveragepy/issue/194
     # Build a path that has two pieces matching the absolute path prefix.
     # Technically, this test doesn't do that on Windows, but drive
     # letters make that impractical to achieve.
     fl = FileLocator()
     d = abs_file(os.curdir)
     trick = os.path.splitdrive(d)[1].lstrip(os.path.sep)
     rel = os.path.join('sub', trick, 'file1.py')
     self.assertEqual(fl.relative_filename(abs_file(rel)), rel)
Ejemplo n.º 7
0
 def test_filepath_contains_absolute_prefix_twice(self):
     # https://bitbucket.org/ned/coveragepy/issue/194
     # Build a path that has two pieces matching the absolute path prefix.
     # Technically, this test doesn't do that on Windows, but drive
     # letters make that impractical to achieve.
     fl = FileLocator()
     d = abs_file(os.curdir)
     trick = os.path.splitdrive(d)[1].lstrip(os.path.sep)
     rel = os.path.join('sub', trick, 'file1.py')
     self.assertEqual(fl.relative_filename(abs_file(rel)), rel)
Ejemplo n.º 8
0
 def test_odd_filenames(self):
     acu = code_unit_factory("aa/afile.odd.py", FileLocator())
     bcu = code_unit_factory("aa/bb/bfile.odd.py", FileLocator())
     b2cu = code_unit_factory("aa/bb.odd/bfile.py", FileLocator())
     self.assertEqual(acu[0].name, "aa/afile.odd")
     self.assertEqual(bcu[0].name, "aa/bb/bfile.odd")
     self.assertEqual(b2cu[0].name, "aa/bb.odd/bfile")
     self.assertEqual(acu[0].flat_rootname(), "aa_afile_odd")
     self.assertEqual(bcu[0].flat_rootname(), "aa_bb_bfile_odd")
     self.assertEqual(b2cu[0].flat_rootname(), "aa_bb_odd_bfile")
     self.assertEqual(acu[0].source_file().read(), "# afile.odd.py\n")
     self.assertEqual(bcu[0].source_file().read(), "# bfile.odd.py\n")
     self.assertEqual(b2cu[0].source_file().read(), "# bfile.py\n")
Ejemplo n.º 9
0
 def test_filenames(self):
     acu = code_unit_factory("aa/afile.py", FileLocator())
     bcu = code_unit_factory("aa/bb/bfile.py", FileLocator())
     ccu = code_unit_factory("aa/bb/cc/cfile.py", FileLocator())
     self.assertEqual(acu[0].name, "aa/afile")
     self.assertEqual(bcu[0].name, "aa/bb/bfile")
     self.assertEqual(ccu[0].name, "aa/bb/cc/cfile")
     self.assertEqual(acu[0].flat_rootname(), "aa_afile")
     self.assertEqual(bcu[0].flat_rootname(), "aa_bb_bfile")
     self.assertEqual(ccu[0].flat_rootname(), "aa_bb_cc_cfile")
     self.assertEqual(acu[0].source_file().read(), "# afile.py\n")
     self.assertEqual(bcu[0].source_file().read(), "# bfile.py\n")
     self.assertEqual(ccu[0].source_file().read(), "# cfile.py\n")
Ejemplo n.º 10
0
 def test_fnmatch_matcher(self):
     file1 = self.make_file("sub/file1.py")
     file2 = self.make_file("sub/file2.c")
     file3 = self.make_file("sub2/file3.h")
     file4 = self.make_file("sub3/file4.py")
     file5 = self.make_file("sub3/file5.c")
     fl = FileLocator()
     fnm = FnmatchMatcher(["*.py", "*/sub2/*"])
     self.assertTrue(fnm.match(fl.canonical_filename(file1)))
     self.assertFalse(fnm.match(fl.canonical_filename(file2)))
     self.assertTrue(fnm.match(fl.canonical_filename(file3)))
     self.assertTrue(fnm.match(fl.canonical_filename(file4)))
     self.assertFalse(fnm.match(fl.canonical_filename(file5)))
Ejemplo n.º 11
0
 def setUp(self):
     coverage = coveralls(data_file=Arguments.data_file,
                          config_file=Arguments.config_file)
     coverage.load()
     self.reporter = CoverallsReporter(coverage, coverage.config)
     self.reporter.find_code_units(None)
     self.reporter.code_units.append(CodeUnit('LICENSE', FileLocator()))
Ejemplo n.º 12
0
class MatcherTest(CoverageTest):
    """Tests of file matchers."""

    def setUp(self):
        super(MatcherTest, self).setUp()
        self.fl = FileLocator()

    def assertMatches(self, matcher, filepath, matches):
        """The `matcher` should agree with `matches` about `filepath`."""
        canonical = self.fl.canonical_filename(filepath)
        self.assertEqual(
            matcher.match(canonical), matches,
            "File %s should have matched as %s" % (filepath, matches)
        )

    def test_tree_matcher(self):
        matches_to_try = [
            (self.make_file("sub/file1.py"), True),
            (self.make_file("sub/file2.c"), True),
            (self.make_file("sub2/file3.h"), False),
            (self.make_file("sub3/file4.py"), True),
            (self.make_file("sub3/file5.c"), False),
        ]
        fl = FileLocator()
        trees = [
            fl.canonical_filename("sub"),
            fl.canonical_filename("sub3/file4.py"),
            ]
        tm = TreeMatcher(trees)
        self.assertEqual(tm.info(), trees)
        for filepath, matches in matches_to_try:
            self.assertMatches(tm, filepath, matches)

    def test_fnmatch_matcher(self):
        matches_to_try = [
            (self.make_file("sub/file1.py"), True),
            (self.make_file("sub/file2.c"), False),
            (self.make_file("sub2/file3.h"), True),
            (self.make_file("sub3/file4.py"), True),
            (self.make_file("sub3/file5.c"), False),
        ]
        fnm = FnmatchMatcher(["*.py", "*/sub2/*"])
        self.assertEqual(fnm.info(), ["*.py", "*/sub2/*"])
        for filepath, matches in matches_to_try:
            self.assertMatches(fnm, filepath, matches)

    def test_fnmatch_matcher_overload(self):
        fnm = FnmatchMatcher(["*x%03d*.txt" % i for i in range(500)])
        self.assertMatches(fnm, "x007foo.txt", True)
        self.assertMatches(fnm, "x123foo.txt", True)
        self.assertMatches(fnm, "x798bar.txt", False)

    def test_fnmatch_windows_paths(self):
        # We should be able to match Windows paths even if we are running on
        # a non-Windows OS.
        fnm = FnmatchMatcher(["*/foo.py"])
        self.assertMatches(fnm, r"dir\foo.py", True)
        fnm = FnmatchMatcher([r"*\foo.py"])
        self.assertMatches(fnm, r"dir\foo.py", True)
Ejemplo n.º 13
0
 def test_tree_matcher(self):
     matches_to_try = [
         (self.make_file("sub/file1.py"), True),
         (self.make_file("sub/file2.c"), True),
         (self.make_file("sub2/file3.h"), False),
         (self.make_file("sub3/file4.py"), True),
         (self.make_file("sub3/file5.c"), False),
     ]
     fl = FileLocator()
     trees = [
         fl.canonical_filename("sub"),
         fl.canonical_filename("sub3/file4.py"),
         ]
     tm = TreeMatcher(trees)
     self.assertEqual(tm.info(), trees)
     for filepath, matches in matches_to_try:
         self.assertMatches(tm, filepath, matches)
Ejemplo n.º 14
0
 def test_tree_matcher(self):
     matches_to_try = [
         (self.make_file("sub/file1.py"), True),
         (self.make_file("sub/file2.c"), True),
         (self.make_file("sub2/file3.h"), False),
         (self.make_file("sub3/file4.py"), True),
         (self.make_file("sub3/file5.c"), False),
     ]
     fl = FileLocator()
     trees = [
         fl.canonical_filename("sub"),
         fl.canonical_filename("sub3/file4.py"),
     ]
     tm = TreeMatcher(trees)
     self.assertEqual(tm.info(), trees)
     for filepath, matches in matches_to_try:
         self.assertMatches(tm, filepath, matches)
Ejemplo n.º 15
0
    def test_egg(self):
        # Test that we can get files out of eggs, and read their source files.
        # The egg1 module is installed by an action in igor.py.
        import egg1, egg1.egg1
        # Verify that we really imported from an egg.  If we did, then the
        # __file__ won't be an actual file, because one of the "directories"
        # in the path is actually the .egg zip file.
        self.assert_doesnt_exist(egg1.__file__)

        cu = code_unit_factory([egg1, egg1.egg1], FileLocator())
        self.assertEqual(cu[0].source(), "")
        self.assertEqual(cu[1].source().split("\n")[0], "# My egg file!")
Ejemplo n.º 16
0
    def __init__(self, filename, file_locator=None):
        self.file_locator = file_locator or FileLocator()

        # TODO: do we want the .filename attribute to be part of the public
        # API of the coverage plugin?
        self.filename = self.file_locator.canonical_filename(filename)

        # TODO: is self.name required? Can the base class provide it somehow?
        self.name = os.path.relpath(filename)
        # TODO: html filenames are absolute.

        self._source = None
Ejemplo n.º 17
0
 def test_modules(self):
     import aa, aa.bb, aa.bb.cc
     cu = code_unit_factory([aa, aa.bb, aa.bb.cc], FileLocator())
     self.assertEqual(cu[0].name, "aa")
     self.assertEqual(cu[1].name, "aa.bb")
     self.assertEqual(cu[2].name, "aa.bb.cc")
     self.assertEqual(cu[0].flat_rootname(), "aa")
     self.assertEqual(cu[1].flat_rootname(), "aa_bb")
     self.assertEqual(cu[2].flat_rootname(), "aa_bb_cc")
     self.assertEqual(cu[0].source_file().read(), "# aa\n")
     self.assertEqual(cu[1].source_file().read(), "# bb\n")
     self.assertEqual(cu[2].source_file().read(), "")  # yes, empty
Ejemplo n.º 18
0
 def test_module_files(self):
     import aa.afile, aa.bb.bfile, aa.bb.cc.cfile
     cu = code_unit_factory([aa.afile, aa.bb.bfile, aa.bb.cc.cfile],
                            FileLocator())
     self.assertEqual(cu[0].name, "aa.afile")
     self.assertEqual(cu[1].name, "aa.bb.bfile")
     self.assertEqual(cu[2].name, "aa.bb.cc.cfile")
     self.assertEqual(cu[0].flat_rootname(), "aa_afile")
     self.assertEqual(cu[1].flat_rootname(), "aa_bb_bfile")
     self.assertEqual(cu[2].flat_rootname(), "aa_bb_cc_cfile")
     self.assertEqual(cu[0].source_file().read(), "# afile.py\n")
     self.assertEqual(cu[1].source_file().read(), "# bfile.py\n")
     self.assertEqual(cu[2].source_file().read(), "# cfile.py\n")
Ejemplo n.º 19
0
class MatcherTest(CoverageTest):
    """Tests of file matchers."""
    def setUp(self):
        super(MatcherTest, self).setUp()
        self.fl = FileLocator()

    def assertMatches(self, matcher, filepath, matches):
        """The `matcher` should agree with `matches` about `filepath`."""
        canonical = self.fl.canonical_filename(filepath)
        self.assertEqual(
            matcher.match(canonical), matches,
            "File %s should have matched as %s" % (filepath, matches))

    def test_tree_matcher(self):
        matches_to_try = [
            (self.make_file("sub/file1.py"), True),
            (self.make_file("sub/file2.c"), True),
            (self.make_file("sub2/file3.h"), False),
            (self.make_file("sub3/file4.py"), True),
            (self.make_file("sub3/file5.c"), False),
        ]
        fl = FileLocator()
        trees = [
            fl.canonical_filename("sub"),
            fl.canonical_filename("sub3/file4.py"),
        ]
        tm = TreeMatcher(trees)
        self.assertEqual(tm.info(), trees)
        for filepath, matches in matches_to_try:
            self.assertMatches(tm, filepath, matches)

    def test_fnmatch_matcher(self):
        matches_to_try = [
            (self.make_file("sub/file1.py"), True),
            (self.make_file("sub/file2.c"), False),
            (self.make_file("sub2/file3.h"), True),
            (self.make_file("sub3/file4.py"), True),
            (self.make_file("sub3/file5.c"), False),
        ]
        fnm = FnmatchMatcher(["*.py", "*/sub2/*"])
        self.assertEqual(fnm.info(), ["*.py", "*/sub2/*"])
        for filepath, matches in matches_to_try:
            self.assertMatches(fnm, filepath, matches)

    def test_fnmatch_matcher_overload(self):
        fnm = FnmatchMatcher(["*x%03d*.txt" % i for i in range(500)])
        self.assertMatches(fnm, "x007foo.txt", True)
        self.assertMatches(fnm, "x123foo.txt", True)
        self.assertMatches(fnm, "x798bar.txt", False)
Ejemplo n.º 20
0
    def __init__(self, morf, file_locator=None):
        self.file_locator = file_locator or FileLocator()

        if hasattr(morf, '__file__'):
            filename = morf.__file__
        else:
            filename = morf
        filename = self._adjust_filename(filename)
        self.filename = self.file_locator.canonical_filename(filename)

        if hasattr(morf, '__name__'):
            name = morf.__name__
            name = name.replace(".", os.sep) + ".py"
        else:
            name = self.file_locator.relative_filename(filename)
        self.name = name
Ejemplo n.º 21
0
 def test_tree_matcher(self):
     file1 = self.make_file("sub/file1.py")
     file2 = self.make_file("sub/file2.c")
     file3 = self.make_file("sub2/file3.h")
     file4 = self.make_file("sub3/file4.py")
     file5 = self.make_file("sub3/file5.c")
     fl = FileLocator()
     tm = TreeMatcher([
         fl.canonical_filename("sub"),
         fl.canonical_filename(file4),
     ])
     self.assertTrue(tm.match(fl.canonical_filename(file1)))
     self.assertTrue(tm.match(fl.canonical_filename(file2)))
     self.assertFalse(tm.match(fl.canonical_filename(file3)))
     self.assertTrue(tm.match(fl.canonical_filename(file4)))
     self.assertFalse(tm.match(fl.canonical_filename(file5)))
Ejemplo n.º 22
0
class MatcherTest(CoverageTest):
    """Tests of file matchers."""

    def setUp(self):
        super(MatcherTest, self).setUp()
        self.fl = FileLocator()

    def assertMatches(self, matcher, filepath, matches):
        """The `matcher` should agree with `matches` about `filepath`."""
        canonical = self.fl.canonical_filename(filepath)
        self.assertEqual(
            matcher.match(canonical), matches,
            "File %s should have matched as %s" % (filepath, matches)
        )

    def test_tree_matcher(self):
        matches_to_try = [
            (self.make_file("sub/file1.py"), True),
            (self.make_file("sub/file2.c"), True),
            (self.make_file("sub2/file3.h"), False),
            (self.make_file("sub3/file4.py"), True),
            (self.make_file("sub3/file5.c"), False),
        ]
        fl = FileLocator()
        trees = [
            fl.canonical_filename("sub"),
            fl.canonical_filename("sub3/file4.py"),
            ]
        tm = TreeMatcher(trees)
        self.assertEqual(tm.info(), trees)
        for filepath, matches in matches_to_try:
            self.assertMatches(tm, filepath, matches)

    def test_fnmatch_matcher(self):
        matches_to_try = [
            (self.make_file("sub/file1.py"), True),
            (self.make_file("sub/file2.c"), False),
            (self.make_file("sub2/file3.h"), True),
            (self.make_file("sub3/file4.py"), True),
            (self.make_file("sub3/file5.c"), False),
        ]
        fnm = FnmatchMatcher(["*.py", "*/sub2/*"])
        self.assertEqual(fnm.info(), ["*.py", "*/sub2/*"])
        for filepath, matches in matches_to_try:
            self.assertMatches(fnm, filepath, matches)
Ejemplo n.º 23
0
 def test_fnmatch_matcher(self):
     file1 = self.make_file("sub/file1.py")
     file2 = self.make_file("sub/file2.c")
     file3 = self.make_file("sub2/file3.h")
     file4 = self.make_file("sub3/file4.py")
     file5 = self.make_file("sub3/file5.c")
     fl = FileLocator()
     fnm = FnmatchMatcher(["*.py", "*/sub2/*"])
     self.assertTrue(fnm.match(fl.canonical_filename(file1)))
     self.assertFalse(fnm.match(fl.canonical_filename(file2)))
     self.assertTrue(fnm.match(fl.canonical_filename(file3)))
     self.assertTrue(fnm.match(fl.canonical_filename(file4)))
     self.assertFalse(fnm.match(fl.canonical_filename(file5)))
Ejemplo n.º 24
0
 def test_tree_matcher(self):
     file1 = self.make_file("sub/file1.py")
     file2 = self.make_file("sub/file2.c")
     file3 = self.make_file("sub2/file3.h")
     file4 = self.make_file("sub3/file4.py")
     file5 = self.make_file("sub3/file5.c")
     fl = FileLocator()
     tm = TreeMatcher([
         fl.canonical_filename("sub"),
         fl.canonical_filename(file4),
         ])
     self.assertTrue(tm.match(fl.canonical_filename(file1)))
     self.assertTrue(tm.match(fl.canonical_filename(file2)))
     self.assertFalse(tm.match(fl.canonical_filename(file3)))
     self.assertTrue(tm.match(fl.canonical_filename(file4)))
     self.assertFalse(tm.match(fl.canonical_filename(file5)))
Ejemplo n.º 25
0
    def __init__(self, data_file=None, data_suffix=False, cover_pylib=False,
                auto_data=False, timid=False, branch=False):
        """        
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended to `data_file` to create the
        final file name.  If `data_suffix` is simply True, then a suffix is
        created with the machine and process identity included.
        
        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.
        
        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.
        
        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.
        
        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        """
        from coverage import __version__
        
        self.cover_pylib = cover_pylib
        self.auto_data = auto_data
        self.atexit_registered = False

        self.exclude_re = ""
        self.exclude_list = []
        
        self.file_locator = FileLocator()
        
        # Timidity: for nose users, read an environment variable.  This is a
        # cheap hack, since the rest of the command line arguments aren't
        # recognized, but it solves some users' problems.
        timid = timid or ('--timid' in os.environ.get('COVERAGE_OPTIONS', ''))
        self.collector = Collector(
            self._should_trace, timid=timid, branch=branch
            )

        # Create the data file.
        if data_suffix:
            if not isinstance(data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid
                data_suffix = ".%s.%s" % (socket.gethostname(), os.getpid())
        else:
            data_suffix = None

        self.data = CoverageData(
            basename=data_file, suffix=data_suffix,
            collector="coverage v%s" % __version__
            )

        # The default exclude pattern.
        self.exclude('# *pragma[: ]*[nN][oO] *[cC][oO][vV][eE][rR]')

        # The prefix for files considered "installed with the interpreter".
        if not self.cover_pylib:
            # Look at where the "os" module is located.  That's the indication
            # for "installed with the interpreter".
            os_file = self.file_locator.canonical_filename(os.__file__)
            self.pylib_prefix = os.path.split(os_file)[0]

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        here = self.file_locator.canonical_filename(__file__)
        self.cover_prefix = os.path.split(here)[0]
Ejemplo n.º 26
0
 def setUp(self):
     super(MatcherTest, self).setUp()
     self.fl = FileLocator()
Ejemplo n.º 27
0
    def __init__(self,
                 data_file=None,
                 data_suffix=None,
                 cover_pylib=None,
                 auto_data=False,
                 timid=None,
                 branch=None,
                 config_file=True,
                 source=None,
                 omit=None,
                 include=None,
                 debug=None,
                 debug_file=None):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.
        
        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.
        
        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.
        
        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.
        
        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.
        
        `config_file` determines what config file to read.  If it is a string,
        it is the name of the config file to read.  If it is True, then a
        standard file is read (".coveragerc").  If it is False, then no file is
        read.
        
        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.
        
        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.  Each
        will also accept a single string argument.
        
        `debug` is a list of strings indicating what debugging information is
        desired. `debug_file` is the file to write debug messages to,
        defaulting to stderr.
        
        """
        from coverage import __version__
        self._warnings = []
        self.config = CoverageConfig()
        if config_file:
            if config_file is True:
                config_file = '.coveragerc'
            try:
                self.config.from_file(config_file)
            except ValueError:
                _, err, _ = sys.exc_info()
                raise CoverageException("Couldn't read config file %s: %s" %
                                        (config_file, err))

        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file
        self.config.from_args(data_file=data_file,
                              cover_pylib=cover_pylib,
                              timid=timid,
                              branch=branch,
                              parallel=bool_or_none(data_suffix),
                              source=source,
                              omit=omit,
                              include=include,
                              debug=debug)
        self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)
        self.auto_data = auto_data
        self._exclude_re = {}
        self._exclude_regex_stale()
        self.file_locator = FileLocator()
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)
        self.collector = Collector(self._should_trace,
                                   timid=self.config.timid,
                                   branch=self.config.branch,
                                   warn=self._warn)
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix
        self.data = CoverageData(basename=self.config.data_file,
                                 collector='coverage v%s' % __version__,
                                 debug=self.debug)
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            for m in (atexit, os, random, socket, _structseq):
                if m is not None and hasattr(m, '__file__'):
                    m_dir = self._canonical_dir(m)
                    if m_dir not in self.pylib_dirs:
                        self.pylib_dirs.append(m_dir)

        self.cover_dir = self._canonical_dir(__file__)
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None
        Numbers.set_precision(self.config.precision)
        self._warn_no_data = True
        self._warn_unimported_source = True
        self._started = False
        self._measured = False
        atexit.register(self._atexit)
Ejemplo n.º 28
0
class coverage(object):
    def __init__(self,
                 data_file=None,
                 data_suffix=None,
                 cover_pylib=None,
                 auto_data=False,
                 timid=None,
                 branch=None,
                 config_file=True,
                 source=None,
                 omit=None,
                 include=None,
                 debug=None,
                 debug_file=None):
        from coverage import __version__
        self._warnings = []
        self.config = CoverageConfig()
        if config_file:
            if config_file is True:
                config_file = '.coveragerc'
            try:
                self.config.from_file(config_file)
            except ValueError:
                _, err, _ = sys.exc_info()
                raise CoverageException("Couldn't read config file %s: %s" %
                                        (config_file, err))

        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file
        self.config.from_args(data_file=data_file,
                              cover_pylib=cover_pylib,
                              timid=timid,
                              branch=branch,
                              parallel=bool_or_none(data_suffix),
                              source=source,
                              omit=omit,
                              include=include,
                              debug=debug)
        self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)
        self.auto_data = auto_data
        self._exclude_re = {}
        self._exclude_regex_stale()
        self.file_locator = FileLocator()
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)
        self.collector = Collector(self._should_trace,
                                   timid=self.config.timid,
                                   branch=self.config.branch,
                                   warn=self._warn)
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix
        self.data = CoverageData(basename=self.config.data_file,
                                 collector='coverage v%s' % __version__,
                                 debug=self.debug)
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            for m in (atexit, os, random, socket, _structseq):
                if m is not None and hasattr(m, '__file__'):
                    m_dir = self._canonical_dir(m)
                    if m_dir not in self.pylib_dirs:
                        self.pylib_dirs.append(m_dir)

        self.cover_dir = self._canonical_dir(__file__)
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None
        Numbers.set_precision(self.config.precision)
        self._warn_no_data = True
        self._warn_unimported_source = True
        self._started = False
        self._measured = False
        atexit.register(self._atexit)

    def _canonical_dir(self, morf):
        return os.path.split(CodeUnit(morf, self.file_locator).filename)[0]

    def _source_for_file(self, filename):
        if not filename.endswith('.py'):
            if filename[-4:-1] == '.py':
                filename = filename[:-1]
            elif filename.endswith('$py.class'):
                filename = filename[:-9] + '.py'
        return filename

    def _should_trace_with_reason(self, filename, frame):
        if not filename:
            return (None, "empty string isn't a filename")
        if filename.startswith('<'):
            return (None, 'not a real filename')
        self._check_for_packages()
        dunder_file = frame.f_globals.get('__file__')
        if dunder_file:
            filename = self._source_for_file(dunder_file)
        if filename.endswith('$py.class'):
            filename = filename[:-9] + '.py'
        canonical = self.file_locator.canonical_filename(filename)
        if self.source_match:
            if not self.source_match.match(canonical):
                return (None, 'falls outside the --source trees')
        elif self.include_match:
            if not self.include_match.match(canonical):
                return (None, 'falls outside the --include trees')
        else:
            if self.pylib_match and self.pylib_match.match(canonical):
                return (None, 'is in the stdlib')
            if self.cover_match and self.cover_match.match(canonical):
                return (None, 'is part of coverage.py')
        if self.omit_match and self.omit_match.match(canonical):
            return (None, 'is inside an --omit pattern')
        return (canonical, 'because we love you')

    def _should_trace(self, filename, frame):
        canonical, reason = self._should_trace_with_reason(filename, frame)
        if self.debug.should('trace'):
            if not canonical:
                msg = 'Not tracing %r: %s' % (filename, reason)
            else:
                msg = 'Tracing %r' % (filename, )
            self.debug.write(msg)
        return canonical

    def _warn(self, msg):
        self._warnings.append(msg)
        sys.stderr.write('Coverage.py warning: %s\n' % msg)

    def _check_for_packages(self):
        if self.source_pkgs:
            found = []
            for pkg in self.source_pkgs:
                try:
                    mod = sys.modules[pkg]
                except KeyError:
                    continue

                found.append(pkg)
                try:
                    pkg_file = mod.__file__
                except AttributeError:
                    pkg_file = None
                else:
                    d, f = os.path.split(pkg_file)
                    if f.startswith('__init__'):
                        pkg_file = d
                    else:
                        pkg_file = self._source_for_file(pkg_file)
                    pkg_file = self.file_locator.canonical_filename(pkg_file)
                    if not os.path.exists(pkg_file):
                        pkg_file = None

                if pkg_file:
                    self.source.append(pkg_file)
                    self.source_match.add(pkg_file)
                else:
                    self._warn('Module %s has no Python source.' % pkg)

            for pkg in found:
                self.source_pkgs.remove(pkg)

    def use_cache(self, usecache):
        self.data.usefile(usecache)

    def load(self):
        self.collector.reset()
        self.data.read()

    def start(self):
        if self.run_suffix:
            self.data_suffix = self.run_suffix
        if self.auto_data:
            self.load()
        if self.source or self.source_pkgs:
            self.source_match = TreeMatcher(self.source)
        else:
            if self.cover_dir:
                self.cover_match = TreeMatcher([self.cover_dir])
            if self.pylib_dirs:
                self.pylib_match = TreeMatcher(self.pylib_dirs)
        if self.include:
            self.include_match = FnmatchMatcher(self.include)
        if self.omit:
            self.omit_match = FnmatchMatcher(self.omit)
        if self.debug.should('config'):
            self.debug.write('Configuration values:')
            config_info = sorted(self.config.__dict__.items())
            self.debug.write_formatted_info(config_info)
        if self.debug.should('sys'):
            self.debug.write('Debugging info:')
            self.debug.write_formatted_info(self.sysinfo())
        self.collector.start()
        self._started = True
        self._measured = True

    def stop(self):
        self._started = False
        self.collector.stop()

    def _atexit(self):
        if self._started:
            self.stop()
        if self.auto_data:
            self.save()

    def erase(self):
        self.collector.reset()
        self.data.erase()

    def clear_exclude(self, which='exclude'):
        setattr(self.config, which + '_list', [])
        self._exclude_regex_stale()

    def exclude(self, regex, which='exclude'):
        excl_list = getattr(self.config, which + '_list')
        excl_list.append(regex)
        self._exclude_regex_stale()

    def _exclude_regex_stale(self):
        self._exclude_re.clear()

    def _exclude_regex(self, which):
        if which not in self._exclude_re:
            excl_list = getattr(self.config, which + '_list')
            self._exclude_re[which] = join_regex(excl_list)
        return self._exclude_re[which]

    def get_exclude_list(self, which='exclude'):
        return getattr(self.config, which + '_list')

    def save(self):
        data_suffix = self.data_suffix
        if data_suffix is True:
            extra = ''
            if _TEST_NAME_FILE:
                f = open(_TEST_NAME_FILE)
                test_name = f.read()
                f.close()
                extra = '.' + test_name
            data_suffix = '%s%s.%s.%06d' % (socket.gethostname(), extra,
                                            os.getpid(),
                                            random.randint(0, 999999))
        self._harvest_data()
        self.data.write(suffix=data_suffix)

    def combine(self):
        aliases = None
        if self.config.paths:
            aliases = PathAliases(self.file_locator)
            for paths in self.config.paths.values():
                result = paths[0]
                for pattern in paths[1:]:
                    aliases.add(pattern, result)

        self.data.combine_parallel_data(aliases=aliases)

    def _harvest_data(self):
        if not self._measured:
            return
        self.data.add_line_data(self.collector.get_line_data())
        self.data.add_arc_data(self.collector.get_arc_data())
        self.collector.reset()
        if self._warn_unimported_source:
            for pkg in self.source_pkgs:
                self._warn('Module %s was never imported.' % pkg)

        summary = self.data.summary()
        if not summary and self._warn_no_data:
            self._warn('No data was collected.')
        for src in self.source:
            for py_file in find_python_files(src):
                py_file = self.file_locator.canonical_filename(py_file)
                if self.omit_match and self.omit_match.match(py_file):
                    continue
                self.data.touch_file(py_file)

        self._measured = False

    def analysis(self, morf):
        f, s, _, m, mf = self.analysis2(morf)
        return (f, s, m, mf)

    def analysis2(self, morf):
        analysis = self._analyze(morf)
        return (analysis.filename, analysis.statements, analysis.excluded,
                analysis.missing, analysis.missing_formatted())

    def _analyze(self, it):
        self._harvest_data()
        if not isinstance(it, CodeUnit):
            it = code_unit_factory(it, self.file_locator)[0]
        return Analysis(self, it)

    def report(self,
               morfs=None,
               show_missing=True,
               ignore_errors=None,
               file=None,
               omit=None,
               include=None):
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors,
                              omit=omit,
                              include=include,
                              show_missing=show_missing)
        reporter = SummaryReporter(self, self.config)
        return reporter.report(morfs, outfile=file)

    def annotate(self,
                 morfs=None,
                 directory=None,
                 ignore_errors=None,
                 omit=None,
                 include=None):
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors,
                              omit=omit,
                              include=include)
        reporter = AnnotateReporter(self, self.config)
        reporter.report(morfs, directory=directory)

    def html_report(self,
                    morfs=None,
                    directory=None,
                    ignore_errors=None,
                    omit=None,
                    include=None,
                    extra_css=None,
                    title=None):
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors,
                              omit=omit,
                              include=include,
                              html_dir=directory,
                              extra_css=extra_css,
                              html_title=title)
        reporter = HtmlReporter(self, self.config)
        return reporter.report(morfs)

    def xml_report(self,
                   morfs=None,
                   outfile=None,
                   ignore_errors=None,
                   omit=None,
                   include=None):
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors,
                              omit=omit,
                              include=include,
                              xml_output=outfile)
        file_to_close = None
        delete_file = False
        if self.config.xml_output:
            if self.config.xml_output == '-':
                outfile = sys.stdout
            else:
                outfile = open(self.config.xml_output, 'w')
                file_to_close = outfile
        try:
            reporter = XmlReporter(self, self.config)
            return reporter.report(morfs, outfile=outfile)
        except CoverageException:
            delete_file = True
            raise
        finally:
            if file_to_close:
                file_to_close.close()
                if delete_file:
                    file_be_gone(self.config.xml_output)

    def sysinfo(self):
        import coverage as covmod
        import platform, re
        try:
            implementation = platform.python_implementation()
        except AttributeError:
            implementation = 'unknown'

        info = [('version', covmod.__version__), ('coverage', covmod.__file__),
                ('cover_dir', self.cover_dir), ('pylib_dirs', self.pylib_dirs),
                ('tracer', self.collector.tracer_name()),
                ('config_files', self.config.attempted_config_files),
                ('configs_read', self.config.config_files),
                ('data_path', self.data.filename),
                ('python', sys.version.replace('\n', '')),
                ('platform', platform.platform()),
                ('implementation', implementation),
                ('executable', sys.executable), ('cwd', os.getcwd()),
                ('path', sys.path),
                ('environment',
                 sorted([
                     '%s = %s' % (k, v) for k, v in iitems(os.environ)
                     if re.search('^COV|^PY', k)
                 ])), ('command_line', ' '.join(getattr(sys, 'argv',
                                                        ['???'])))]
        if self.source_match:
            info.append(('source_match', self.source_match.info()))
        if self.include_match:
            info.append(('include_match', self.include_match.info()))
        if self.omit_match:
            info.append(('omit_match', self.omit_match.info()))
        if self.cover_match:
            info.append(('cover_match', self.cover_match.info()))
        if self.pylib_match:
            info.append(('pylib_match', self.pylib_match.info()))
        return info
Ejemplo n.º 29
0
class coverage(object):
    """Programmatic access to Coverage.

    To use::
    
        from coverage import coverage
        
        cov = coverage()
        cov.start()
        #.. blah blah (run your code) blah blah ..
        cov.stop()
        cov.html_report(directory='covhtml')

    """

    def __init__(self, data_file=None, data_suffix=False, cover_pylib=False,
                auto_data=False, timid=False, branch=False):
        """        
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended to `data_file` to create the
        final file name.  If `data_suffix` is simply True, then a suffix is
        created with the machine and process identity included.
        
        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.
        
        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.
        
        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.
        
        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        """
        from coverage import __version__
        
        self.cover_pylib = cover_pylib
        self.auto_data = auto_data
        self.atexit_registered = False

        self.exclude_re = ""
        self.exclude_list = []
        
        self.file_locator = FileLocator()
        
        # Timidity: for nose users, read an environment variable.  This is a
        # cheap hack, since the rest of the command line arguments aren't
        # recognized, but it solves some users' problems.
        timid = timid or ('--timid' in os.environ.get('COVERAGE_OPTIONS', ''))
        self.collector = Collector(
            self._should_trace, timid=timid, branch=branch
            )

        # Create the data file.
        if data_suffix:
            if not isinstance(data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid
                data_suffix = ".%s.%s" % (socket.gethostname(), os.getpid())
        else:
            data_suffix = None

        self.data = CoverageData(
            basename=data_file, suffix=data_suffix,
            collector="coverage v%s" % __version__
            )

        # The default exclude pattern.
        self.exclude('# *pragma[: ]*[nN][oO] *[cC][oO][vV][eE][rR]')

        # The prefix for files considered "installed with the interpreter".
        if not self.cover_pylib:
            # Look at where the "os" module is located.  That's the indication
            # for "installed with the interpreter".
            os_file = self.file_locator.canonical_filename(os.__file__)
            self.pylib_prefix = os.path.split(os_file)[0]

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        here = self.file_locator.canonical_filename(__file__)
        self.cover_prefix = os.path.split(here)[0]

    def _should_trace(self, filename, frame):
        """Decide whether to trace execution in `filename`
        
        This function is called from the trace function.  As each new file name
        is encountered, this function determines whether it is traced or not.
        
        Returns a canonicalized filename if it should be traced, False if it
        should not.
        
        """
        if filename == '<string>':
            # There's no point in ever tracing string executions, we can't do
            # anything with the data later anyway.
            return False

        # Compiled Python files have two filenames: frame.f_code.co_filename is
        # the filename at the time the .pyc was compiled.  The second name
        # is __file__, which is where the .pyc was actually loaded from.  Since
        # .pyc files can be moved after compilation (for example, by being
        # installed), we look for __file__ in the frame and prefer it to the
        # co_filename value.
        dunder_file = frame.f_globals.get('__file__')
        if dunder_file:
            if not dunder_file.endswith(".py"):
                if dunder_file[-4:-1] == ".py":
                    dunder_file = dunder_file[:-1]
            filename = dunder_file

        canonical = self.file_locator.canonical_filename(filename)

        # If we aren't supposed to trace installed code, then check if this is
        # near the Python standard library and skip it if so.
        if not self.cover_pylib:
            if canonical.startswith(self.pylib_prefix):
                return False

        # We exclude the coverage code itself, since a little of it will be
        # measured otherwise.
        if canonical.startswith(self.cover_prefix):
            return False

        return canonical

    # To log what should_trace returns, change this to "if 1:"
    if 0:
        _real_should_trace = _should_trace
        def _should_trace(self, filename, frame):   # pylint: disable-msg=E0102
            """A logging decorator around the real _should_trace function."""
            ret = self._real_should_trace(filename, frame)
            print("should_trace: %r -> %r" % (filename, ret))
            return ret

    def use_cache(self, usecache):
        """Control the use of a data file (incorrectly called a cache).
        
        `usecache` is true or false, whether to read and write data on disk.
        
        """
        self.data.usefile(usecache)

    def load(self):
        """Load previously-collected coverage data from the data file."""
        self.collector.reset()
        self.data.read()
        
    def start(self):
        """Start measuring code coverage."""
        if self.auto_data:
            self.load()
            # Save coverage data when Python exits.
            if not self.atexit_registered:
                atexit.register(self.save)
                self.atexit_registered = True
        self.collector.start()
        
    def stop(self):
        """Stop measuring code coverage."""
        self.collector.stop()
        self._harvest_data()

    def erase(self):
        """Erase previously-collected coverage data.
        
        This removes the in-memory data collected in this session as well as
        discarding the data file.
        
        """
        self.collector.reset()
        self.data.erase()

    def clear_exclude(self):
        """Clear the exclude list."""
        self.exclude_list = []
        self.exclude_re = ""

    def exclude(self, regex):
        """Exclude source lines from execution consideration.
        
        `regex` is a regular expression.  Lines matching this expression are
        not considered executable when reporting code coverage.  A list of
        regexes is maintained; this function adds a new regex to the list.
        Matching any of the regexes excludes a source line.
        
        """
        self.exclude_list.append(regex)
        self.exclude_re = "(" + ")|(".join(self.exclude_list) + ")"

    def get_exclude_list(self):
        """Return the list of excluded regex patterns."""
        return self.exclude_list

    def save(self):
        """Save the collected coverage data to the data file."""
        self._harvest_data()
        self.data.write()

    def combine(self):
        """Combine together a number of similarly-named coverage data files.
        
        All coverage data files whose name starts with `data_file` (from the
        coverage() constructor) will be read, and combined together into the
        current measurements.
        
        """
        self.data.combine_parallel_data()

    def _harvest_data(self):
        """Get the collected data and reset the collector."""
        self.data.add_line_data(self.collector.get_line_data())
        self.data.add_arc_data(self.collector.get_arc_data())
        self.collector.reset()

    # Backward compatibility with version 1.
    def analysis(self, morf):
        """Like `analysis2` but doesn't return excluded line numbers."""
        f, s, _, m, mf = self.analysis2(morf)
        return f, s, m, mf

    def analysis2(self, morf):
        """Analyze a module.
        
        `morf` is a module or a filename.  It will be analyzed to determine
        its coverage statistics.  The return value is a 5-tuple:
        
        * The filename for the module.
        * A list of line numbers of executable statements.
        * A list of line numbers of excluded statements.
        * A list of line numbers of statements not run (missing from execution).
        * A readable formatted string of the missing line numbers.

        The analysis uses the source file itself and the current measured
        coverage data.

        """
        analysis = self._analyze(morf)
        return (
            analysis.filename, analysis.statements, analysis.excluded,
            analysis.missing, analysis.missing_formatted()
            )

    def _analyze(self, it):
        """Analyze a single morf or code unit.
        
        Returns an `Analysis` object.

        """
        if not isinstance(it, CodeUnit):
            it = code_unit_factory(it, self.file_locator)[0]
        
        return Analysis(self, it)

    def report(self, morfs=None, show_missing=True, ignore_errors=False,
                file=None, omit_prefixes=None):     # pylint: disable-msg=W0622
        """Write a summary report to `file`.
        
        Each module in `morfs` is listed, with counts of statements, executed
        statements, missing statements, and a list of lines missed.
        
        """
        reporter = SummaryReporter(self, show_missing, ignore_errors)
        reporter.report(morfs, outfile=file, omit_prefixes=omit_prefixes)

    def annotate(self, morfs=None, directory=None, ignore_errors=False,
                    omit_prefixes=None):
        """Annotate a list of modules.
        
        Each module in `morfs` is annotated.  The source is written to a new
        file, named with a ",cover" suffix, with each line prefixed with a
        marker to indicate the coverage of the line.  Covered lines have ">",
        excluded lines have "-", and missing lines have "!".
        
        """
        reporter = AnnotateReporter(self, ignore_errors)
        reporter.report(
            morfs, directory=directory, omit_prefixes=omit_prefixes)

    def html_report(self, morfs=None, directory=None, ignore_errors=False,
                    omit_prefixes=None):
        """Generate an HTML report.
        
        """
        reporter = HtmlReporter(self, ignore_errors)
        reporter.report(
            morfs, directory=directory, omit_prefixes=omit_prefixes)

    def xml_report(self, morfs=None, outfile=None, ignore_errors=False,
                    omit_prefixes=None):
        """Generate an XML report of coverage results.
        
        The report is compatible with Cobertura reports.
        
        """
        if outfile:
            outfile = open(outfile, "w")
        try:
            reporter = XmlReporter(self, ignore_errors)
            reporter.report(
                morfs, omit_prefixes=omit_prefixes, outfile=outfile)
        finally:
            outfile.close()

    def sysinfo(self):
        """Return a list of key,value pairs showing internal information."""
        
        import coverage as covmod
        import platform, re, sys

        info = [
            ('version', covmod.__version__),
            ('coverage', covmod.__file__),
            ('cover_prefix', self.cover_prefix),
            ('pylib_prefix', self.pylib_prefix),
            ('tracer', self.collector.tracer_name()),
            ('data_path', self.data.filename),
            ('python', sys.version.replace('\n', '')),
            ('platform', platform.platform()),
            ('cwd', os.getcwd()),
            ('path', sys.path),
            ('environment', [
                ("%s = %s" % (k, v)) for k, v in os.environ.items()
                    if re.search("^COV|^PY", k)
                ]),
            ]
        return info
Ejemplo n.º 30
0
class Coverage(object):
    """Programmatic access to coverage.py.

    To use::

        from coverage import coverage

        cov = Coverage()
        cov.start()
        #.. call your code ..
        cov.stop()
        cov.html_report(directory='covhtml')

    """
    def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
                auto_data=False, timid=None, branch=None, config_file=True,
                source=None, omit=None, include=None, debug=None,
                debug_file=None, concurrency=None, plugins=None):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.

        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.

        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.

        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.

        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        `config_file` determines what config file to read.  If it is a string,
        it is the name of the config file to read.  If it is True, then a
        standard file is read (".coveragerc").  If it is False, then no file is
        read.

        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.

        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.  Each
        will also accept a single string argument.

        `debug` is a list of strings indicating what debugging information is
        desired. `debug_file` is the file to write debug messages to,
        defaulting to stderr.

        `concurrency` is a string indicating the concurrency library being used
        in the measured code.  Without this, coverage.py will get incorrect
        results.  Valid strings are "greenlet", "eventlet", "gevent", or
        "thread" (the default).

        `plugins` TODO.

        """
        from coverage import __version__

        # A record of all the warnings that have been issued.
        self._warnings = []

        # Build our configuration from a number of sources:
        # 1: defaults:
        self.config = CoverageConfig()

        # 2: from the .coveragerc or setup.cfg file:
        if config_file:
            did_read_rc = should_read_setupcfg = False
            if config_file is True:
                config_file = ".coveragerc"
                should_read_setupcfg = True
            try:
                did_read_rc = self.config.from_file(config_file)
            except ValueError as err:
                raise CoverageException(
                    "Couldn't read config file %s: %s" % (config_file, err)
                    )

            if not did_read_rc and should_read_setupcfg:
                self.config.from_file("setup.cfg", section_prefix="coverage:")

        # 3: from environment variables:
        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file

        # 4: from constructor arguments:
        self.config.from_args(
            data_file=data_file, cover_pylib=cover_pylib, timid=timid,
            branch=branch, parallel=bool_or_none(data_suffix),
            source=source, omit=omit, include=include, debug=debug,
            concurrency=concurrency, plugins=plugins,
            )

        # Create and configure the debugging controller.
        self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)

        # Load plugins
        self.plugins = Plugins.load_plugins(self.config.plugins, self.config)

        self.trace_judges = []
        for plugin in self.plugins:
            if plugin_implements(plugin, "trace_judge"):
                self.trace_judges.append(plugin)
        self.trace_judges.append(None)      # The Python case.

        self.auto_data = auto_data

        # _exclude_re is a dict mapping exclusion list names to compiled
        # regexes.
        self._exclude_re = {}
        self._exclude_regex_stale()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)

        self.collector = Collector(
            should_trace=self._should_trace,
            check_include=self._tracing_check_include_omit_etc,
            timid=self.config.timid,
            branch=self.config.branch,
            warn=self._warn,
            concurrency=self.config.concurrency,
            )

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(
            basename=self.config.data_file,
            collector="coverage v%s" % __version__,
            debug=self.debug,
            )

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = set()
        if not self.config.cover_pylib:
            # Look at where some standard modules are located. That's the
            # indication for "installed with the interpreter". In some
            # environments (virtualenv, for example), these modules may be
            # spread across a few locations. Look at all the candidate modules
            # we've imported, and take all the different ones.
            for m in (atexit, os, platform, random, socket, _structseq):
                if m is not None and hasattr(m, "__file__"):
                    self.pylib_dirs.add(self._canonical_dir(m))

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self._canonical_dir(__file__)

        # The matchers for _should_trace.
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None

        # Set the reporting precision.
        Numbers.set_precision(self.config.precision)

        # Is it ok for no data to be collected?
        self._warn_no_data = True
        self._warn_unimported_source = True

        # State machine variables:
        # Have we started collecting and not stopped it?
        self._started = False
        # Have we measured some data and not harvested it?
        self._measured = False

        atexit.register(self._atexit)

    def _canonical_dir(self, morf):
        """Return the canonical directory of the module or file `morf`."""
        morf_filename = PythonCodeUnit(morf, self.file_locator).filename
        return os.path.split(morf_filename)[0]

    def _source_for_file(self, filename):
        """Return the source file for `filename`."""
        if not filename.endswith(".py"):
            if filename[-4:-1] == ".py":
                filename = filename[:-1]
            elif filename.endswith("$py.class"): # jython
                filename = filename[:-9] + ".py"
        return filename

    def _should_trace_with_reason(self, filename, frame):
        """Decide whether to trace execution in `filename`, with a reason.

        This function is called from the trace function.  As each new file name
        is encountered, this function determines whether it is traced or not.

        Returns a FileDisposition object.

        """
        disp = FileDisposition(filename)
        def nope(disp, reason):
            disp.trace = False
            disp.reason = reason
            return disp

        self._check_for_packages()

        # Compiled Python files have two filenames: frame.f_code.co_filename is
        # the filename at the time the .pyc was compiled.  The second name is
        # __file__, which is where the .pyc was actually loaded from.  Since
        # .pyc files can be moved after compilation (for example, by being
        # installed), we look for __file__ in the frame and prefer it to the
        # co_filename value.
        dunder_file = frame.f_globals.get('__file__')
        if dunder_file:
            filename = self._source_for_file(dunder_file)

        if not filename:
            # Empty string is pretty useless
            return nope(disp, "empty string isn't a filename")

        if filename.startswith('memory:'):
            return nope(disp, "memory isn't traceable")

        if filename.startswith('<'):
            # Lots of non-file execution is represented with artificial
            # filenames like "<string>", "<doctest readme.txt[0]>", or
            # "<exec_function>".  Don't ever trace these executions, since we
            # can't do anything with the data later anyway.
            return nope(disp, "not a real filename")

        # Jython reports the .class file to the tracer, use the source file.
        if filename.endswith("$py.class"):
            filename = filename[:-9] + ".py"

        canonical = self.file_locator.canonical_filename(filename)
        disp.canonical_filename = canonical

        # Try the plugins, see if they have an opinion about the file.
        for plugin in self.trace_judges:
            if plugin:
                plugin.trace_judge(disp)
            else:
                disp.trace = True
                disp.source_filename = canonical
            if disp.trace:
                disp.plugin = plugin

                if disp.check_filters:
                    reason = self._check_include_omit_etc(disp.source_filename)
                    if reason:
                        nope(disp, reason)

                return disp

        return nope(disp, "no plugin found")  # TODO: a test that causes this.

    def _check_include_omit_etc(self, filename):
        """Check a filename against the include, omit, etc, rules.

        Returns a string or None.  String means, don't trace, and is the reason
        why.  None means no reason found to not trace.

        """
        # If the user specified source or include, then that's authoritative
        # about the outer bound of what to measure and we don't have to apply
        # any canned exclusions. If they didn't, then we have to exclude the
        # stdlib and coverage.py directories.
        if self.source_match:
            if not self.source_match.match(filename):
                return "falls outside the --source trees"
        elif self.include_match:
            if not self.include_match.match(filename):
                return "falls outside the --include trees"
        else:
            # If we aren't supposed to trace installed code, then check if this
            # is near the Python standard library and skip it if so.
            if self.pylib_match and self.pylib_match.match(filename):
                return "is in the stdlib"

            # We exclude the coverage code itself, since a little of it will be
            # measured otherwise.
            if self.cover_match and self.cover_match.match(filename):
                return "is part of coverage.py"

        # Check the file against the omit pattern.
        if self.omit_match and self.omit_match.match(filename):
            return "is inside an --omit pattern"

        # No reason found to skip this file.
        return None

    def _should_trace(self, filename, frame):
        """Decide whether to trace execution in `filename`.

        Calls `_should_trace_with_reason`, and returns the FileDisposition.

        """
        disp = self._should_trace_with_reason(filename, frame)
        if self.debug.should('trace'):
            self.debug.write(disp.debug_message())
        return disp

    def _tracing_check_include_omit_etc(self, filename):
        """Check a filename against the include, omit, etc, rules, and say so.

        Returns a boolean: True if the file should be traced, False if not.

        """
        reason = self._check_include_omit_etc(filename)
        if self.debug.should('trace'):
            if not reason:
                msg = "Tracing %r" % (filename,)
            else:
                msg = "Not tracing %r: %s" % (filename, reason)
            self.debug.write(msg)

        return not reason

    def _warn(self, msg):
        """Use `msg` as a warning."""
        self._warnings.append(msg)
        sys.stderr.write("Coverage.py warning: %s\n" % msg)

    def _check_for_packages(self):
        """Update the source_match matcher with latest imported packages."""
        # Our self.source_pkgs attribute is a list of package names we want to
        # measure.  Each time through here, we see if we've imported any of
        # them yet.  If so, we add its file to source_match, and we don't have
        # to look for that package any more.
        if self.source_pkgs:
            found = []
            for pkg in self.source_pkgs:
                try:
                    mod = sys.modules[pkg]
                except KeyError:
                    continue

                found.append(pkg)

                try:
                    pkg_file = mod.__file__
                except AttributeError:
                    pkg_file = None
                else:
                    d, f = os.path.split(pkg_file)
                    if f.startswith('__init__'):
                        # This is actually a package, return the directory.
                        pkg_file = d
                    else:
                        pkg_file = self._source_for_file(pkg_file)
                    pkg_file = self.file_locator.canonical_filename(pkg_file)
                    if not os.path.exists(pkg_file):
                        pkg_file = None

                if pkg_file:
                    self.source.append(pkg_file)
                    self.source_match.add(pkg_file)
                else:
                    self._warn("Module %s has no Python source." % pkg)

            for pkg in found:
                self.source_pkgs.remove(pkg)

    def use_cache(self, usecache):
        """Control the use of a data file (incorrectly called a cache).

        `usecache` is true or false, whether to read and write data on disk.

        """
        self.data.usefile(usecache)

    def load(self):
        """Load previously-collected coverage data from the data file."""
        self.collector.reset()
        self.data.read()

    def start(self):
        """Start measuring code coverage.

        Coverage measurement actually occurs in functions called after `start`
        is invoked.  Statements in the same scope as `start` won't be measured.

        Once you invoke `start`, you must also call `stop` eventually, or your
        process might not shut down cleanly.

        """
        if self.run_suffix:
            # Calling start() means we're running code, so use the run_suffix
            # as the data_suffix when we eventually save the data.
            self.data_suffix = self.run_suffix
        if self.auto_data:
            self.load()

        # Create the matchers we need for _should_trace
        if self.source or self.source_pkgs:
            self.source_match = TreeMatcher(self.source)
        else:
            if self.cover_dir:
                self.cover_match = TreeMatcher([self.cover_dir])
            if self.pylib_dirs:
                self.pylib_match = TreeMatcher(self.pylib_dirs)
        if self.include:
            self.include_match = FnmatchMatcher(self.include)
        if self.omit:
            self.omit_match = FnmatchMatcher(self.omit)

        # The user may want to debug things, show info if desired.
        if self.debug.should('config'):
            self.debug.write("Configuration values:")
            config_info = sorted(self.config.__dict__.items())
            self.debug.write_formatted_info(config_info)

        if self.debug.should('sys'):
            self.debug.write("Debugging info:")
            self.debug.write_formatted_info(self.sysinfo())

        self.collector.start()
        self._started = True
        self._measured = True

    def stop(self):
        """Stop measuring code coverage."""
        self._started = False
        self.collector.stop()

    def _atexit(self):
        """Clean up on process shutdown."""
        if self._started:
            self.stop()
        if self.auto_data:
            self.save()

    def erase(self):
        """Erase previously-collected coverage data.

        This removes the in-memory data collected in this session as well as
        discarding the data file.

        """
        self.collector.reset()
        self.data.erase()

    def clear_exclude(self, which='exclude'):
        """Clear the exclude list."""
        setattr(self.config, which + "_list", [])
        self._exclude_regex_stale()

    def exclude(self, regex, which='exclude'):
        """Exclude source lines from execution consideration.

        A number of lists of regular expressions are maintained.  Each list
        selects lines that are treated differently during reporting.

        `which` determines which list is modified.  The "exclude" list selects
        lines that are not considered executable at all.  The "partial" list
        indicates lines with branches that are not taken.

        `regex` is a regular expression.  The regex is added to the specified
        list.  If any of the regexes in the list is found in a line, the line
        is marked for special treatment during reporting.

        """
        excl_list = getattr(self.config, which + "_list")
        excl_list.append(regex)
        self._exclude_regex_stale()

    def _exclude_regex_stale(self):
        """Drop all the compiled exclusion regexes, a list was modified."""
        self._exclude_re.clear()

    def _exclude_regex(self, which):
        """Return a compiled regex for the given exclusion list."""
        if which not in self._exclude_re:
            excl_list = getattr(self.config, which + "_list")
            self._exclude_re[which] = join_regex(excl_list)
        return self._exclude_re[which]

    def get_exclude_list(self, which='exclude'):
        """Return a list of excluded regex patterns.

        `which` indicates which list is desired.  See `exclude` for the lists
        that are available, and their meaning.

        """
        return getattr(self.config, which + "_list")

    def save(self):
        """Save the collected coverage data to the data file."""
        data_suffix = self.data_suffix
        if data_suffix is True:
            # If data_suffix was a simple true value, then make a suffix with
            # plenty of distinguishing information.  We do this here in
            # `save()` at the last minute so that the pid will be correct even
            # if the process forks.
            extra = ""
            if _TEST_NAME_FILE:
                f = open(_TEST_NAME_FILE)
                test_name = f.read()
                f.close()
                extra = "." + test_name
            data_suffix = "%s%s.%s.%06d" % (
                socket.gethostname(), extra, os.getpid(),
                random.randint(0, 999999)
                )

        self._harvest_data()
        self.data.write(suffix=data_suffix)

    def combine(self):
        """Combine together a number of similarly-named coverage data files.

        All coverage data files whose name starts with `data_file` (from the
        coverage() constructor) will be read, and combined together into the
        current measurements.

        """
        aliases = None
        if self.config.paths:
            aliases = PathAliases(self.file_locator)
            for paths in self.config.paths.values():
                result = paths[0]
                for pattern in paths[1:]:
                    aliases.add(pattern, result)
        self.data.combine_parallel_data(aliases=aliases)

    def _harvest_data(self):
        """Get the collected data and reset the collector.

        Also warn about various problems collecting data.

        """
        if not self._measured:
            return

        # TODO: seems like this parallel structure is getting kinda old...
        self.data.add_line_data(self.collector.get_line_data())
        self.data.add_arc_data(self.collector.get_arc_data())
        self.data.add_plugin_data(self.collector.get_plugin_data())
        self.collector.reset()

        # If there are still entries in the source_pkgs list, then we never
        # encountered those packages.
        if self._warn_unimported_source:
            for pkg in self.source_pkgs:
                self._warn("Module %s was never imported." % pkg)

        # Find out if we got any data.
        summary = self.data.summary()
        if not summary and self._warn_no_data:
            self._warn("No data was collected.")

        # Find files that were never executed at all.
        for src in self.source:
            for py_file in find_python_files(src):
                py_file = self.file_locator.canonical_filename(py_file)

                if self.omit_match and self.omit_match.match(py_file):
                    # Turns out this file was omitted, so don't pull it back
                    # in as unexecuted.
                    continue

                self.data.touch_file(py_file)

        self._measured = False

    # Backward compatibility with version 1.
    def analysis(self, morf):
        """Like `analysis2` but doesn't return excluded line numbers."""
        f, s, _, m, mf = self.analysis2(morf)
        return f, s, m, mf

    def analysis2(self, morf):
        """Analyze a module.

        `morf` is a module or a filename.  It will be analyzed to determine
        its coverage statistics.  The return value is a 5-tuple:

        * The filename for the module.
        * A list of line numbers of executable statements.
        * A list of line numbers of excluded statements.
        * A list of line numbers of statements not run (missing from
          execution).
        * A readable formatted string of the missing line numbers.

        The analysis uses the source file itself and the current measured
        coverage data.

        """
        analysis = self._analyze(morf)
        return (
            analysis.filename,
            sorted(analysis.statements),
            sorted(analysis.excluded),
            sorted(analysis.missing),
            analysis.missing_formatted(),
            )

    def _analyze(self, it):
        """Analyze a single morf or code unit.

        Returns an `Analysis` object.

        """
        def get_plugin(filename):
            """For code_unit_factory to use to find the plugin for a file."""
            plugin = None
            plugin_name = self.data.plugin_data().get(filename)
            if plugin_name:
                plugin = self.plugins.get(plugin_name)
            return plugin

        self._harvest_data()
        if not isinstance(it, CodeUnit):
            it = code_unit_factory(it, self.file_locator, get_plugin)[0]

        return Analysis(self, it)

    def report(self, morfs=None, show_missing=True, ignore_errors=None,
                file=None,                          # pylint: disable=W0622
                omit=None, include=None
                ):
        """Write a summary report to `file`.

        Each module in `morfs` is listed, with counts of statements, executed
        statements, missing statements, and a list of lines missed.

        `include` is a list of filename patterns.  Modules whose filenames
        match those patterns will be included in the report. Modules matching
        `omit` will not be included in the report.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include,
            show_missing=show_missing,
            )
        reporter = SummaryReporter(self, self.config)
        return reporter.report(morfs, outfile=file)

    def annotate(self, morfs=None, directory=None, ignore_errors=None,
                    omit=None, include=None):
        """Annotate a list of modules.

        Each module in `morfs` is annotated.  The source is written to a new
        file, named with a ",cover" suffix, with each line prefixed with a
        marker to indicate the coverage of the line.  Covered lines have ">",
        excluded lines have "-", and missing lines have "!".

        See `coverage.report()` for other arguments.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include
            )
        reporter = AnnotateReporter(self, self.config)
        reporter.report(morfs, directory=directory)

    def html_report(self, morfs=None, directory=None, ignore_errors=None,
                    omit=None, include=None, extra_css=None, title=None):
        """Generate an HTML report.

        The HTML is written to `directory`.  The file "index.html" is the
        overview starting point, with links to more detailed pages for
        individual modules.

        `extra_css` is a path to a file of other CSS to apply on the page.
        It will be copied into the HTML directory.

        `title` is a text string (not HTML) to use as the title of the HTML
        report.

        See `coverage.report()` for other arguments.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include,
            html_dir=directory, extra_css=extra_css, html_title=title,
            )
        reporter = HtmlReporter(self, self.config)
        return reporter.report(morfs)

    def xml_report(self, morfs=None, outfile=None, ignore_errors=None,
                    omit=None, include=None):
        """Generate an XML report of coverage results.

        The report is compatible with Cobertura reports.

        Each module in `morfs` is included in the report.  `outfile` is the
        path to write the file to, "-" will write to stdout.

        See `coverage.report()` for other arguments.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include,
            xml_output=outfile,
            )
        file_to_close = None
        delete_file = False
        if self.config.xml_output:
            if self.config.xml_output == '-':
                outfile = sys.stdout
            else:
                # Ensure that the output directory is created; done here
                # because this report pre-opens the output file.
                # HTMLReport does this using the Report plumbing because
                # its task is more complex, being multiple files.
                output_dir = os.path.dirname(self.config.xml_output)
                if output_dir and not os.path.isdir(output_dir):
                    os.makedirs(output_dir)
                outfile = open(self.config.xml_output, "w")
                file_to_close = outfile
        try:
            reporter = XmlReporter(self, self.config)
            return reporter.report(morfs, outfile=outfile)
        except CoverageException:
            delete_file = True
            raise
        finally:
            if file_to_close:
                file_to_close.close()
                if delete_file:
                    file_be_gone(self.config.xml_output)

    def sysinfo(self):
        """Return a list of (key, value) pairs showing internal information."""

        import coverage as covmod

        try:
            implementation = platform.python_implementation()
        except AttributeError:
            implementation = "unknown"

        info = [
            ('version', covmod.__version__),
            ('coverage', covmod.__file__),
            ('cover_dir', self.cover_dir),
            ('pylib_dirs', self.pylib_dirs),
            ('tracer', self.collector.tracer_name()),
            ('config_files', self.config.attempted_config_files),
            ('configs_read', self.config.config_files),
            ('data_path', self.data.filename),
            ('python', sys.version.replace('\n', '')),
            ('platform', platform.platform()),
            ('implementation', implementation),
            ('executable', sys.executable),
            ('cwd', os.getcwd()),
            ('path', sys.path),
            ('environment', sorted(
                ("%s = %s" % (k, v)) for k, v in iitems(os.environ)
                    if k.startswith(("COV", "PY"))
                )),
            ('command_line', " ".join(getattr(sys, 'argv', ['???']))),
            ]
        if self.source_match:
            info.append(('source_match', self.source_match.info()))
        if self.include_match:
            info.append(('include_match', self.include_match.info()))
        if self.omit_match:
            info.append(('omit_match', self.omit_match.info()))
        if self.cover_match:
            info.append(('cover_match', self.cover_match.info()))
        if self.pylib_match:
            info.append(('pylib_match', self.pylib_match.info()))

        return info
Ejemplo n.º 31
0
class coverage(object):
    """Programmatic access to coverage.py.

    To use::

        from coverage import coverage

        cov = coverage()
        cov.start()
        #.. call your code ..
        cov.stop()
        cov.html_report(directory='covhtml')

    """
    def __init__(self,
                 data_file=None,
                 data_suffix=None,
                 cover_pylib=None,
                 auto_data=False,
                 timid=None,
                 branch=None,
                 config_file=True,
                 source=None,
                 omit=None,
                 include=None):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.

        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.

        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.

        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.

        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        `config_file` determines what config file to read.  If it is a string,
        it is the name of the config file to read.  If it is True, then a
        standard file is read (".coveragerc").  If it is False, then no file is
        read.

        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.

        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.  Each
        will also accept a single string argument.

        """
        from coverage import __version__

        # A record of all the warnings that have been issued.
        self._warnings = []

        # Build our configuration from a number of sources:
        # 1: defaults:
        self.config = CoverageConfig()

        # 2: from the coveragerc file:
        if config_file:
            if config_file is True:
                config_file = ".coveragerc"
            try:
                self.config.from_file(config_file)
            except ValueError:
                _, err, _ = sys.exc_info()
                raise CoverageException("Couldn't read config file %s: %s" %
                                        (config_file, err))

        # 3: from environment variables:
        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file

        # 4: from constructor arguments:
        self.config.from_args(data_file=data_file,
                              cover_pylib=cover_pylib,
                              timid=timid,
                              branch=branch,
                              parallel=bool_or_none(data_suffix),
                              source=source,
                              omit=omit,
                              include=include)

        self.auto_data = auto_data

        # _exclude_re is a dict mapping exclusion list names to compiled
        # regexes.
        self._exclude_re = {}
        self._exclude_regex_stale()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)

        self.collector = Collector(self._should_trace,
                                   timid=self.config.timid,
                                   branch=self.config.branch,
                                   warn=self._warn)

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(basename=self.config.data_file,
                                 collector="coverage v%s" % __version__)

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            # Look at where some standard modules are located. That's the
            # indication for "installed with the interpreter". In some
            # environments (virtualenv, for example), these modules may be
            # spread across a few locations. Look at all the candidate modules
            # we've imported, and take all the different ones.
            for m in (atexit, os, random, socket):
                if hasattr(m, "__file__"):
                    m_dir = self._canonical_dir(m)
                    if m_dir not in self.pylib_dirs:
                        self.pylib_dirs.append(m_dir)

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self._canonical_dir(__file__)

        # The matchers for _should_trace, created when tracing starts.
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None

        # Only _harvest_data once per measurement cycle.
        self._harvested = False

        # Set the reporting precision.
        Numbers.set_precision(self.config.precision)

        # Is it ok for no data to be collected?
        self._warn_no_data = True
        self._started = False

        atexit.register(self._atexit)

    def _canonical_dir(self, morf):
        """Return the canonical directory of the module or file `morf`."""
        return os.path.split(CodeUnit(morf, self.file_locator).filename)[0]

    def _source_for_file(self, filename):
        """Return the source file for `filename`."""
        if not filename.endswith(".py"):
            if filename[-4:-1] == ".py":
                filename = filename[:-1]
            elif filename.endswith("$py.class"):  # jython
                filename = filename[:-9] + ".py"
        return filename

    def _should_trace(self, filename, frame):
        """Decide whether to trace execution in `filename`

        This function is called from the trace function.  As each new file name
        is encountered, this function determines whether it is traced or not.

        Returns a canonicalized filename if it should be traced, False if it
        should not.

        """
        if filename.startswith('<'):
            # Lots of non-file execution is represented with artificial
            # filenames like "<string>", "<doctest readme.txt[0]>", or
            # "<exec_function>".  Don't ever trace these executions, since we
            # can't do anything with the data later anyway.
            return False

        self._check_for_packages()

        # Compiled Python files have two filenames: frame.f_code.co_filename is
        # the filename at the time the .pyc was compiled.  The second name is
        # __file__, which is where the .pyc was actually loaded from.  Since
        # .pyc files can be moved after compilation (for example, by being
        # installed), we look for __file__ in the frame and prefer it to the
        # co_filename value.
        dunder_file = frame.f_globals.get('__file__')
        if dunder_file:
            filename = self._source_for_file(dunder_file)

        # Jython reports the .class file to the tracer, use the source file.
        if filename.endswith("$py.class"):
            filename = filename[:-9] + ".py"

        canonical = self.file_locator.canonical_filename(filename)

        # If the user specified source or include, then that's authoritative
        # about the outer bound of what to measure and we don't have to apply
        # any canned exclusions. If they didn't, then we have to exclude the
        # stdlib and coverage.py directories.
        if self.source_match:
            if not self.source_match.match(canonical):
                return False
        elif self.include_match:
            if not self.include_match.match(canonical):
                return False
        else:
            # If we aren't supposed to trace installed code, then check if this
            # is near the Python standard library and skip it if so.
            if self.pylib_match and self.pylib_match.match(canonical):
                return False

            # We exclude the coverage code itself, since a little of it will be
            # measured otherwise.
            if self.cover_match and self.cover_match.match(canonical):
                return False

        # Check the file against the omit pattern.
        if self.omit_match and self.omit_match.match(canonical):
            return False

        return canonical

    # To log what should_trace returns, change this to "if 1:"
    if 0:
        _real_should_trace = _should_trace

        def _should_trace(self, filename, frame):  # pylint: disable=E0102
            """A logging decorator around the real _should_trace function."""
            ret = self._real_should_trace(filename, frame)
            print("should_trace: %r -> %r" % (filename, ret))
            return ret

    def _warn(self, msg):
        """Use `msg` as a warning."""
        self._warnings.append(msg)
        sys.stderr.write("Coverage.py warning: %s\n" % msg)

    def _check_for_packages(self):
        """Update the source_match matcher with latest imported packages."""
        # Our self.source_pkgs attribute is a list of package names we want to
        # measure.  Each time through here, we see if we've imported any of
        # them yet.  If so, we add its file to source_match, and we don't have
        # to look for that package any more.
        if self.source_pkgs:
            found = []
            for pkg in self.source_pkgs:
                try:
                    mod = sys.modules[pkg]
                except KeyError:
                    continue

                found.append(pkg)

                try:
                    pkg_file = mod.__file__
                except AttributeError:
                    pkg_file = None
                else:
                    d, f = os.path.split(pkg_file)
                    if f.startswith('__init__'):
                        # This is actually a package, return the directory.
                        pkg_file = d
                    else:
                        pkg_file = self._source_for_file(pkg_file)
                    pkg_file = self.file_locator.canonical_filename(pkg_file)
                    if not os.path.exists(pkg_file):
                        pkg_file = None

                if pkg_file:
                    self.source.append(pkg_file)
                    self.source_match.add(pkg_file)
                else:
                    self._warn("Module %s has no Python source." % pkg)

            for pkg in found:
                self.source_pkgs.remove(pkg)

    def use_cache(self, usecache):
        """Control the use of a data file (incorrectly called a cache).

        `usecache` is true or false, whether to read and write data on disk.

        """
        self.data.usefile(usecache)

    def load(self):
        """Load previously-collected coverage data from the data file."""
        self.collector.reset()
        self.data.read()

    def start(self):
        """Start measuring code coverage.

        Coverage measurement actually occurs in functions called after `start`
        is invoked.  Statements in the same scope as `start` won't be measured.

        Once you invoke `start`, you must also call `stop` eventually, or your
        process might not shut down cleanly.

        """
        if self.run_suffix:
            # Calling start() means we're running code, so use the run_suffix
            # as the data_suffix when we eventually save the data.
            self.data_suffix = self.run_suffix
        if self.auto_data:
            self.load()

        # Create the matchers we need for _should_trace
        if self.source or self.source_pkgs:
            self.source_match = TreeMatcher(self.source)
        else:
            if self.cover_dir:
                self.cover_match = TreeMatcher([self.cover_dir])
            if self.pylib_dirs:
                self.pylib_match = TreeMatcher(self.pylib_dirs)
        if self.include:
            self.include_match = FnmatchMatcher(self.include)
        if self.omit:
            self.omit_match = FnmatchMatcher(self.omit)

        self._harvested = False
        self.collector.start()
        self._started = True

    def stop(self):
        """Stop measuring code coverage."""
        self._started = False
        self.collector.stop()

    def _atexit(self):
        """Clean up on process shutdown."""
        if self._started:
            self.stop()
        if self.auto_data:
            self.save()

    def erase(self):
        """Erase previously-collected coverage data.

        This removes the in-memory data collected in this session as well as
        discarding the data file.

        """
        self.collector.reset()
        self.data.erase()

    def clear_exclude(self, which='exclude'):
        """Clear the exclude list."""
        setattr(self.config, which + "_list", [])
        self._exclude_regex_stale()

    def exclude(self, regex, which='exclude'):
        """Exclude source lines from execution consideration.

        A number of lists of regular expressions are maintained.  Each list
        selects lines that are treated differently during reporting.

        `which` determines which list is modified.  The "exclude" list selects
        lines that are not considered executable at all.  The "partial" list
        indicates lines with branches that are not taken.

        `regex` is a regular expression.  The regex is added to the specified
        list.  If any of the regexes in the list is found in a line, the line
        is marked for special treatment during reporting.

        """
        excl_list = getattr(self.config, which + "_list")
        excl_list.append(regex)
        self._exclude_regex_stale()

    def _exclude_regex_stale(self):
        """Drop all the compiled exclusion regexes, a list was modified."""
        self._exclude_re.clear()

    def _exclude_regex(self, which):
        """Return a compiled regex for the given exclusion list."""
        if which not in self._exclude_re:
            excl_list = getattr(self.config, which + "_list")
            self._exclude_re[which] = join_regex(excl_list)
        return self._exclude_re[which]

    def get_exclude_list(self, which='exclude'):
        """Return a list of excluded regex patterns.

        `which` indicates which list is desired.  See `exclude` for the lists
        that are available, and their meaning.

        """
        return getattr(self.config, which + "_list")

    def save(self):
        """Save the collected coverage data to the data file."""
        data_suffix = self.data_suffix
        if data_suffix is True:
            # If data_suffix was a simple true value, then make a suffix with
            # plenty of distinguishing information.  We do this here in
            # `save()` at the last minute so that the pid will be correct even
            # if the process forks.
            data_suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(),
                                          random.randint(0, 99999))

        self._harvest_data()
        self.data.write(suffix=data_suffix)

    def combine(self):
        """Combine together a number of similarly-named coverage data files.

        All coverage data files whose name starts with `data_file` (from the
        coverage() constructor) will be read, and combined together into the
        current measurements.

        """
        aliases = None
        if self.config.paths:
            aliases = PathAliases(self.file_locator)
            for paths in self.config.paths.values():
                result = paths[0]
                for pattern in paths[1:]:
                    aliases.add(pattern, result)
        self.data.combine_parallel_data(aliases=aliases)

    def _harvest_data(self):
        """Get the collected data and reset the collector.

        Also warn about various problems collecting data.

        """
        if not self._harvested:
            self.data.add_line_data(self.collector.get_line_data())
            self.data.add_arc_data(self.collector.get_arc_data())
            self.collector.reset()

            # If there are still entries in the source_pkgs list, then we never
            # encountered those packages.
            for pkg in self.source_pkgs:
                self._warn("Module %s was never imported." % pkg)

            # Find out if we got any data.
            summary = self.data.summary()
            if not summary and self._warn_no_data:
                self._warn("No data was collected.")

            # Find files that were never executed at all.
            for src in self.source:
                for py_file in find_python_files(src):
                    py_file = self.file_locator.canonical_filename(py_file)
                    self.data.touch_file(py_file)

            self._harvested = True

    # Backward compatibility with version 1.
    def analysis(self, morf):
        """Like `analysis2` but doesn't return excluded line numbers."""
        f, s, _, m, mf = self.analysis2(morf)
        return f, s, m, mf

    def analysis2(self, morf):
        """Analyze a module.

        `morf` is a module or a filename.  It will be analyzed to determine
        its coverage statistics.  The return value is a 5-tuple:

        * The filename for the module.
        * A list of line numbers of executable statements.
        * A list of line numbers of excluded statements.
        * A list of line numbers of statements not run (missing from
          execution).
        * A readable formatted string of the missing line numbers.

        The analysis uses the source file itself and the current measured
        coverage data.

        """
        analysis = self._analyze(morf)
        return (analysis.filename, analysis.statements, analysis.excluded,
                analysis.missing, analysis.missing_formatted())

    def _analyze(self, it):
        """Analyze a single morf or code unit.

        Returns an `Analysis` object.

        """
        self._harvest_data()
        if not isinstance(it, CodeUnit):
            it = code_unit_factory(it, self.file_locator)[0]

        return Analysis(self, it)

    def report(
            self,
            morfs=None,
            show_missing=True,
            ignore_errors=None,
            file=None,  # pylint: disable=W0622
            omit=None,
            include=None):
        """Write a summary report to `file`.

        Each module in `morfs` is listed, with counts of statements, executed
        statements, missing statements, and a list of lines missed.

        `include` is a list of filename patterns.  Modules whose filenames
        match those patterns will be included in the report. Modules matching
        `omit` will not be included in the report.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors,
            omit=omit,
            include=include,
            show_missing=show_missing,
        )
        reporter = SummaryReporter(self, self.config)
        return reporter.report(morfs, outfile=file)

    def annotate(self,
                 morfs=None,
                 directory=None,
                 ignore_errors=None,
                 omit=None,
                 include=None):
        """Annotate a list of modules.

        Each module in `morfs` is annotated.  The source is written to a new
        file, named with a ",cover" suffix, with each line prefixed with a
        marker to indicate the coverage of the line.  Covered lines have ">",
        excluded lines have "-", and missing lines have "!".

        See `coverage.report()` for other arguments.

        """
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors,
                              omit=omit,
                              include=include)
        reporter = AnnotateReporter(self, self.config)
        reporter.report(morfs, directory=directory)

    def html_report(self,
                    morfs=None,
                    directory=None,
                    ignore_errors=None,
                    omit=None,
                    include=None,
                    extra_css=None,
                    title=None):
        """Generate an HTML report.

        The HTML is written to `directory`.  The file "index.html" is the
        overview starting point, with links to more detailed pages for
        individual modules.

        `extra_css` is a path to a file of other CSS to apply on the page.
        It will be copied into the HTML directory.

        `title` is a text string (not HTML) to use as the title of the HTML
        report.

        See `coverage.report()` for other arguments.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors,
            omit=omit,
            include=include,
            html_dir=directory,
            extra_css=extra_css,
            html_title=title,
        )
        reporter = HtmlReporter(self, self.config)
        return reporter.report(morfs)

    def xml_report(self,
                   morfs=None,
                   outfile=None,
                   ignore_errors=None,
                   omit=None,
                   include=None):
        """Generate an XML report of coverage results.

        The report is compatible with Cobertura reports.

        Each module in `morfs` is included in the report.  `outfile` is the
        path to write the file to, "-" will write to stdout.

        See `coverage.report()` for other arguments.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors,
            omit=omit,
            include=include,
            xml_output=outfile,
        )
        file_to_close = None
        delete_file = False
        if self.config.xml_output:
            if self.config.xml_output == '-':
                outfile = sys.stdout
            else:
                outfile = open(self.config.xml_output, "w")
                file_to_close = outfile
        try:
            reporter = XmlReporter(self, self.config)
            return reporter.report(morfs, outfile=outfile)
        except CoverageException:
            delete_file = True
            raise
        finally:
            if file_to_close:
                file_to_close.close()
                if delete_file:
                    file_be_gone(self.config.xml_output)

    def sysinfo(self):
        """Return a list of (key, value) pairs showing internal information."""

        import coverage as covmod
        import platform, re

        try:
            implementation = platform.python_implementation()
        except AttributeError:
            implementation = "unknown"

        info = [
            ('version', covmod.__version__),
            ('coverage', covmod.__file__),
            ('cover_dir', self.cover_dir),
            ('pylib_dirs', self.pylib_dirs),
            ('tracer', self.collector.tracer_name()),
            ('data_path', self.data.filename),
            ('python', sys.version.replace('\n', '')),
            ('platform', platform.platform()),
            ('implementation', implementation),
            ('executable', sys.executable),
            ('cwd', os.getcwd()),
            ('path', sys.path),
            ('environment', [("%s = %s" % (k, v))
                             for k, v in iitems(os.environ)
                             if re.search(r"^COV|^PY", k)]),
        ]
        return info
Ejemplo n.º 32
0
Archivo: control.py Proyecto: th0/test2
class Coverage(object):
    """Programmatic access to coverage.py.

    To use::

        from coverage import Coverage

        cov = Coverage()
        cov.start()
        #.. call your code ..
        cov.stop()
        cov.html_report(directory='covhtml')

    """
    def __init__(
        self,
        data_file=None,
        data_suffix=None,
        cover_pylib=None,
        auto_data=False,
        timid=None,
        branch=None,
        config_file=True,
        source=None,
        omit=None,
        include=None,
        debug=None,
        concurrency=None,
    ):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.

        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.

        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.

        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.

        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        `config_file` determines what configuration file to read:

            * If it is ".coveragerc", it is interpreted as if it were True,
              for backward compatibility.

            * If it is a string, it is the name of the file to read.  If the
              file can't be read, it is an error.

            * If it is True, then a few standard files names are tried
              (".coveragerc", "setup.cfg").  It is not an error for these files
              to not be found.

            * If it is False, then no configuration file is read.

        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.

        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.  Each
        will also accept a single string argument.

        `debug` is a list of strings indicating what debugging information is
        desired.

        `concurrency` is a string indicating the concurrency library being used
        in the measured code.  Without this, coverage.py will get incorrect
        results.  Valid strings are "greenlet", "eventlet", "gevent", or
        "thread" (the default).

        """
        # Build our configuration from a number of sources:
        # 1: defaults:
        self.config = CoverageConfig()

        # 2: from the rcfile, .coveragerc or setup.cfg file:
        if config_file:
            did_read_rc = False
            # Some API users were specifying ".coveragerc" to mean the same as
            # True, so make it so.
            if config_file == ".coveragerc":
                config_file = True
            specified_file = (config_file is not True)
            if not specified_file:
                config_file = ".coveragerc"

            did_read_rc = self.config.from_file(config_file)

            if not did_read_rc:
                if specified_file:
                    raise CoverageException(
                        "Couldn't read '%s' as a config file" % config_file)
                self.config.from_file("setup.cfg", section_prefix="coverage:")

        # 3: from environment variables:
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file

        # 4: from constructor arguments:
        self.config.from_args(
            data_file=data_file,
            cover_pylib=cover_pylib,
            timid=timid,
            branch=branch,
            parallel=bool_or_none(data_suffix),
            source=source,
            omit=omit,
            include=include,
            debug=debug,
            concurrency=concurrency,
        )

        self._debug_file = None
        self._auto_data = auto_data
        self._data_suffix = data_suffix

        # The matchers for _should_trace.
        self.source_match = None
        self.source_pkgs_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None

        # Is it ok for no data to be collected?
        self._warn_no_data = True
        self._warn_unimported_source = True

        # A record of all the warnings that have been issued.
        self._warnings = []

        # Other instance attributes, set later.
        self.omit = self.include = self.source = None
        self.source_pkgs = self.file_locator = None
        self.data = self.collector = None
        self.plugins = self.file_tracers = None
        self.pylib_dirs = self.cover_dir = None
        self.data_suffix = self.run_suffix = None
        self._exclude_re = None
        self.debug = None

        # State machine variables:
        # Have we initialized everything?
        self._inited = False
        # Have we started collecting and not stopped it?
        self._started = False
        # Have we measured some data and not harvested it?
        self._measured = False

    def _init(self):
        """Set all the initial state.

        This is called by the public methods to initialize state. This lets us
        construct a Coverage object, then tweak its state before this function
        is called.

        """
        from coverage import __version__

        if self._inited:
            return

        # Create and configure the debugging controller.
        if self._debug_file is None:
            self._debug_file = sys.stderr
        self.debug = DebugControl(self.config.debug, self._debug_file)

        # Load plugins
        self.plugins = Plugins.load_plugins(self.config.plugins, self.config)

        self.file_tracers = []
        for plugin in self.plugins:
            if overrides(plugin, "file_tracer", CoveragePlugin):
                self.file_tracers.append(plugin)

        # _exclude_re is a dict that maps exclusion list names to compiled
        # regexes.
        self._exclude_re = {}
        self._exclude_regex_stale()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)

        concurrency = self.config.concurrency
        if concurrency == "multiprocessing":
            patch_multiprocessing()
            concurrency = None

        self.collector = Collector(
            should_trace=self._should_trace,
            check_include=self._check_include_omit_etc,
            timid=self.config.timid,
            branch=self.config.branch,
            warn=self._warn,
            concurrency=concurrency,
        )

        # Early warning if we aren't going to be able to support plugins.
        if self.file_tracers and not self.collector.supports_plugins:
            raise CoverageException(
                "Plugin file tracers (%s) aren't supported with %s" % (
                    ", ".join(ft._coverage_plugin_name
                              for ft in self.file_tracers),
                    self.collector.tracer_name(),
                ))

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if self._data_suffix or self.config.parallel:
            if not isinstance(self._data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                self._data_suffix = True
        else:
            self._data_suffix = None
        self.data_suffix = None
        self.run_suffix = self._data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(
            basename=self.config.data_file,
            collector="coverage v%s" % __version__,
            debug=self.debug,
        )

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = set()
        if not self.config.cover_pylib:
            # Look at where some standard modules are located. That's the
            # indication for "installed with the interpreter". In some
            # environments (virtualenv, for example), these modules may be
            # spread across a few locations. Look at all the candidate modules
            # we've imported, and take all the different ones.
            for m in (atexit, os, platform, random, socket, _structseq):
                if m is not None and hasattr(m, "__file__"):
                    self.pylib_dirs.add(self._canonical_dir(m))
            if _structseq and not hasattr(_structseq, '__file__'):
                # PyPy 2.4 has no __file__ in the builtin modules, but the code
                # objects still have the filenames.  So dig into one to find
                # the path to exclude.
                structseq_new = _structseq.structseq_new
                try:
                    structseq_file = structseq_new.func_code.co_filename
                except AttributeError:
                    structseq_file = structseq_new.__code__.co_filename
                self.pylib_dirs.add(self._canonical_dir(structseq_file))

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self._canonical_dir(__file__)

        # Set the reporting precision.
        Numbers.set_precision(self.config.precision)

        atexit.register(self._atexit)

        self._inited = True

        # Create the matchers we need for _should_trace
        if self.source or self.source_pkgs:
            self.source_match = TreeMatcher(self.source)
            self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
        else:
            if self.cover_dir:
                self.cover_match = TreeMatcher([self.cover_dir])
            if self.pylib_dirs:
                self.pylib_match = TreeMatcher(self.pylib_dirs)
        if self.include:
            self.include_match = FnmatchMatcher(self.include)
        if self.omit:
            self.omit_match = FnmatchMatcher(self.omit)

        # The user may want to debug things, show info if desired.
        wrote_any = False
        if self.debug.should('config'):
            config_info = sorted(self.config.__dict__.items())
            self.debug.write_formatted_info("config", config_info)
            wrote_any = True

        if self.debug.should('sys'):
            self.debug.write_formatted_info("sys", self.sys_info())
            for plugin in self.plugins:
                header = "sys: " + plugin._coverage_plugin_name
                info = plugin.sys_info()
                self.debug.write_formatted_info(header, info)
            wrote_any = True

        if wrote_any:
            self.debug.write_formatted_info("end", ())

    def _canonical_dir(self, morf):
        """Return the canonical directory of the module or file `morf`."""
        morf_filename = PythonCodeUnit(morf, self).filename
        return os.path.split(morf_filename)[0]

    def _source_for_file(self, filename):
        """Return the source file for `filename`.

        Given a filename being traced, return the best guess as to the source
        file to attribute it to.

        """
        if filename.endswith(".py"):
            # .py files are themselves source files.
            return filename

        elif filename.endswith((".pyc", ".pyo")):
            # Bytecode files probably have source files near them.
            py_filename = filename[:-1]
            if os.path.exists(py_filename):
                # Found a .py file, use that.
                return py_filename
            if env.WINDOWS:
                # On Windows, it could be a .pyw file.
                pyw_filename = py_filename + "w"
                if os.path.exists(pyw_filename):
                    return pyw_filename
            # Didn't find source, but it's probably the .py file we want.
            return py_filename

        elif filename.endswith("$py.class"):
            # Jython is easy to guess.
            return filename[:-9] + ".py"

        # No idea, just use the filename as-is.
        return filename

    def _name_for_module(self, module_globals, filename):
        """Get the name of the module for a set of globals and filename.

        For configurability's sake, we allow __main__ modules to be matched by
        their importable name.

        If loaded via runpy (aka -m), we can usually recover the "original"
        full dotted module name, otherwise, we resort to interpreting the
        filename to get the module's name.  In the case that the module name
        can't be determined, None is returned.

        """
        dunder_name = module_globals.get('__name__', None)

        if isinstance(dunder_name, str) and dunder_name != '__main__':
            # This is the usual case: an imported module.
            return dunder_name

        loader = module_globals.get('__loader__', None)
        for attrname in ('fullname', 'name'):  # attribute renamed in py3.2
            if hasattr(loader, attrname):
                fullname = getattr(loader, attrname)
            else:
                continue

            if isinstance(fullname, str) and fullname != '__main__':
                # Module loaded via: runpy -m
                return fullname

        # Script as first argument to Python command line.
        inspectedname = inspect.getmodulename(filename)
        if inspectedname is not None:
            return inspectedname
        else:
            return dunder_name

    def _should_trace_internal(self, filename, frame):
        """Decide whether to trace execution in `filename`, with a reason.

        This function is called from the trace function.  As each new file name
        is encountered, this function determines whether it is traced or not.

        Returns a FileDisposition object.

        """
        disp = FileDisposition(filename)

        def nope(disp, reason):
            """Simple helper to make it easy to return NO."""
            disp.trace = False
            disp.reason = reason
            return disp

        # Compiled Python files have two filenames: frame.f_code.co_filename is
        # the filename at the time the .pyc was compiled.  The second name is
        # __file__, which is where the .pyc was actually loaded from.  Since
        # .pyc files can be moved after compilation (for example, by being
        # installed), we look for __file__ in the frame and prefer it to the
        # co_filename value.
        dunder_file = frame.f_globals.get('__file__')
        if dunder_file:
            filename = self._source_for_file(dunder_file)

        if not filename:
            # Empty string is pretty useless.
            return nope(disp, "empty string isn't a filename")

        if filename.startswith('memory:'):
            return nope(disp, "memory isn't traceable")

        if filename.startswith('<'):
            # Lots of non-file execution is represented with artificial
            # filenames like "<string>", "<doctest readme.txt[0]>", or
            # "<exec_function>".  Don't ever trace these executions, since we
            # can't do anything with the data later anyway.
            return nope(disp, "not a real filename")

        # Jython reports the .class file to the tracer, use the source file.
        if filename.endswith("$py.class"):
            filename = filename[:-9] + ".py"

        canonical = self.file_locator.canonical_filename(filename)
        disp.canonical_filename = canonical

        # Try the plugins, see if they have an opinion about the file.
        plugin = None
        for plugin in self.file_tracers:
            if not plugin._coverage_enabled:
                continue

            try:
                file_tracer = plugin.file_tracer(canonical)
                if file_tracer is not None:
                    file_tracer._coverage_plugin_name = \
                        plugin._coverage_plugin_name
                    disp.trace = True
                    disp.file_tracer = file_tracer
                    if file_tracer.has_dynamic_source_filename():
                        disp.has_dynamic_filename = True
                    else:
                        disp.source_filename = \
                            self.file_locator.canonical_filename(
                                file_tracer.source_filename()
                            )
                    break
            except Exception as e:
                self._warn("Disabling plugin %r due to an exception:" %
                           (plugin._coverage_plugin_name))
                traceback.print_exc()
                plugin._coverage_enabled = False
                continue
        else:
            # No plugin wanted it: it's Python.
            disp.trace = True
            disp.source_filename = canonical

        if not disp.has_dynamic_filename:
            if not disp.source_filename:
                raise CoverageException(
                    "Plugin %r didn't set source_filename for %r" %
                    (plugin, disp.original_filename))
            reason = self._check_include_omit_etc_internal(
                disp.source_filename,
                frame,
            )
            if reason:
                nope(disp, reason)

        return disp

    def _check_include_omit_etc_internal(self, filename, frame):
        """Check a filename against the include, omit, etc, rules.

        Returns a string or None.  String means, don't trace, and is the reason
        why.  None means no reason found to not trace.

        """
        modulename = self._name_for_module(frame.f_globals, filename)

        # If the user specified source or include, then that's authoritative
        # about the outer bound of what to measure and we don't have to apply
        # any canned exclusions. If they didn't, then we have to exclude the
        # stdlib and coverage.py directories.
        if self.source_match:
            if self.source_pkgs_match.match(modulename):
                if modulename in self.source_pkgs:
                    self.source_pkgs.remove(modulename)
                return None  # There's no reason to skip this file.

            if not self.source_match.match(filename):
                return "falls outside the --source trees"
        elif self.include_match:
            if not self.include_match.match(filename):
                return "falls outside the --include trees"
        else:
            # If we aren't supposed to trace installed code, then check if this
            # is near the Python standard library and skip it if so.
            if self.pylib_match and self.pylib_match.match(filename):
                return "is in the stdlib"

            # We exclude the coverage code itself, since a little of it will be
            # measured otherwise.
            if self.cover_match and self.cover_match.match(filename):
                return "is part of coverage.py"

        # Check the file against the omit pattern.
        if self.omit_match and self.omit_match.match(filename):
            return "is inside an --omit pattern"

        # No reason found to skip this file.
        return None

    def _should_trace(self, filename, frame):
        """Decide whether to trace execution in `filename`.

        Calls `_should_trace_internal`, and returns the FileDisposition.

        """
        disp = self._should_trace_internal(filename, frame)
        if self.debug.should('trace'):
            self.debug.write(disp.debug_message())
        return disp

    def _check_include_omit_etc(self, filename, frame):
        """Check a filename against the include/omit/etc, rules, verbosely.

        Returns a boolean: True if the file should be traced, False if not.

        """
        reason = self._check_include_omit_etc_internal(filename, frame)
        if self.debug.should('trace'):
            if not reason:
                msg = "Including %r" % (filename, )
            else:
                msg = "Not including %r: %s" % (filename, reason)
            self.debug.write(msg)

        return not reason

    def _warn(self, msg):
        """Use `msg` as a warning."""
        self._warnings.append(msg)
        if self.debug.should("pid"):
            msg = "[%d] %s" % (os.getpid(), msg)
        sys.stderr.write("Coverage.py warning: %s\n" % msg)

    def use_cache(self, usecache):
        """Control the use of a data file (incorrectly called a cache).

        `usecache` is true or false, whether to read and write data on disk.

        """
        self._init()
        self.data.usefile(usecache)

    def load(self):
        """Load previously-collected coverage data from the data file."""
        self._init()
        self.collector.reset()
        self.data.read()

    def start(self):
        """Start measuring code coverage.

        Coverage measurement actually occurs in functions called after `start`
        is invoked.  Statements in the same scope as `start` won't be measured.

        Once you invoke `start`, you must also call `stop` eventually, or your
        process might not shut down cleanly.

        """
        self._init()
        if self.run_suffix:
            # Calling start() means we're running code, so use the run_suffix
            # as the data_suffix when we eventually save the data.
            self.data_suffix = self.run_suffix
        if self._auto_data:
            self.load()

        self.collector.start()
        self._started = True
        self._measured = True

    def stop(self):
        """Stop measuring code coverage."""
        if self._started:
            self.collector.stop()
        self._started = False

    def _atexit(self):
        """Clean up on process shutdown."""
        if self._started:
            self.stop()
        if self._auto_data:
            self.save()

    def erase(self):
        """Erase previously-collected coverage data.

        This removes the in-memory data collected in this session as well as
        discarding the data file.

        """
        self._init()
        self.collector.reset()
        self.data.erase()

    def clear_exclude(self, which='exclude'):
        """Clear the exclude list."""
        self._init()
        setattr(self.config, which + "_list", [])
        self._exclude_regex_stale()

    def exclude(self, regex, which='exclude'):
        """Exclude source lines from execution consideration.

        A number of lists of regular expressions are maintained.  Each list
        selects lines that are treated differently during reporting.

        `which` determines which list is modified.  The "exclude" list selects
        lines that are not considered executable at all.  The "partial" list
        indicates lines with branches that are not taken.

        `regex` is a regular expression.  The regex is added to the specified
        list.  If any of the regexes in the list is found in a line, the line
        is marked for special treatment during reporting.

        """
        self._init()
        excl_list = getattr(self.config, which + "_list")
        excl_list.append(regex)
        self._exclude_regex_stale()

    def _exclude_regex_stale(self):
        """Drop all the compiled exclusion regexes, a list was modified."""
        self._exclude_re.clear()

    def _exclude_regex(self, which):
        """Return a compiled regex for the given exclusion list."""
        if which not in self._exclude_re:
            excl_list = getattr(self.config, which + "_list")
            self._exclude_re[which] = join_regex(excl_list)
        return self._exclude_re[which]

    def get_exclude_list(self, which='exclude'):
        """Return a list of excluded regex patterns.

        `which` indicates which list is desired.  See `exclude` for the lists
        that are available, and their meaning.

        """
        self._init()
        return getattr(self.config, which + "_list")

    def save(self):
        """Save the collected coverage data to the data file."""
        self._init()
        data_suffix = self.data_suffix
        if data_suffix is True:
            # If data_suffix was a simple true value, then make a suffix with
            # plenty of distinguishing information.  We do this here in
            # `save()` at the last minute so that the pid will be correct even
            # if the process forks.
            extra = ""
            if _TEST_NAME_FILE:  # pragma: debugging
                with open(_TEST_NAME_FILE) as f:
                    test_name = f.read()
                extra = "." + test_name
            data_suffix = "%s%s.%s.%06d" % (socket.gethostname(), extra,
                                            os.getpid(),
                                            random.randint(0, 999999))

        self._harvest_data()
        self.data.write(suffix=data_suffix)

    def combine(self):
        """Combine together a number of similarly-named coverage data files.

        All coverage data files whose name starts with `data_file` (from the
        coverage() constructor) will be read, and combined together into the
        current measurements.

        """
        self._init()
        aliases = None
        if self.config.paths:
            aliases = PathAliases(self.file_locator)
            for paths in self.config.paths.values():
                result = paths[0]
                for pattern in paths[1:]:
                    aliases.add(pattern, result)
        self.data.combine_parallel_data(aliases=aliases)

    def _harvest_data(self):
        """Get the collected data and reset the collector.

        Also warn about various problems collecting data.

        """
        self._init()
        if not self._measured:
            return

        # TODO: seems like this parallel structure is getting kinda old...
        self.data.add_line_data(self.collector.get_line_data())
        self.data.add_arc_data(self.collector.get_arc_data())
        self.data.add_plugin_data(self.collector.get_plugin_data())
        self.collector.reset()

        # If there are still entries in the source_pkgs list, then we never
        # encountered those packages.
        if self._warn_unimported_source:
            for pkg in self.source_pkgs:
                if pkg not in sys.modules:
                    self._warn("Module %s was never imported." % pkg)
                elif not (hasattr(sys.modules[pkg], '__file__')
                          and os.path.exists(sys.modules[pkg].__file__)):
                    self._warn("Module %s has no Python source." % pkg)
                else:
                    self._warn("Module %s was previously imported, "
                               "but not measured." % pkg)

        # Find out if we got any data.
        summary = self.data.summary()
        if not summary and self._warn_no_data:
            self._warn("No data was collected.")

        # Find files that were never executed at all.
        for src in self.source:
            for py_file in find_python_files(src):
                py_file = self.file_locator.canonical_filename(py_file)

                if self.omit_match and self.omit_match.match(py_file):
                    # Turns out this file was omitted, so don't pull it back
                    # in as unexecuted.
                    continue

                self.data.touch_file(py_file)

        self._measured = False

    # Backward compatibility with version 1.
    def analysis(self, morf):
        """Like `analysis2` but doesn't return excluded line numbers."""
        f, s, _, m, mf = self.analysis2(morf)
        return f, s, m, mf

    def analysis2(self, morf):
        """Analyze a module.

        `morf` is a module or a filename.  It will be analyzed to determine
        its coverage statistics.  The return value is a 5-tuple:

        * The filename for the module.
        * A list of line numbers of executable statements.
        * A list of line numbers of excluded statements.
        * A list of line numbers of statements not run (missing from
          execution).
        * A readable formatted string of the missing line numbers.

        The analysis uses the source file itself and the current measured
        coverage data.

        """
        self._init()
        analysis = self._analyze(morf)
        return (
            analysis.filename,
            sorted(analysis.statements),
            sorted(analysis.excluded),
            sorted(analysis.missing),
            analysis.missing_formatted(),
        )

    def _analyze(self, it):
        """Analyze a single morf or code unit.

        Returns an `Analysis` object.

        """
        self._harvest_data()
        if not isinstance(it, FileReporter):
            it = self._get_file_reporter(it)

        return Analysis(self, it)

    def _get_file_reporter(self, morf):
        """Get a FileReporter for a module or filename."""
        plugin = None

        if isinstance(morf, string_class):
            plugin_name = self.data.plugin_data().get(morf)
            if plugin_name:
                plugin = self.plugins.get(plugin_name)

        if plugin:
            file_reporter = plugin.file_reporter(morf)
            if file_reporter is None:
                raise CoverageException(
                    "Plugin %r did not provide a file reporter for %r." %
                    (plugin._coverage_plugin_name, morf))
        else:
            file_reporter = PythonCodeUnit(morf, self)

        return file_reporter

    def _get_file_reporters(self, morfs=None):
        """Get a list of FileReporters for a list of modules or filenames.

        For each module or filename in `morfs`, find a FileReporter.  Return
        the list of FileReporters.

        If `morfs` is a single module or filename, this returns a list of one
        FileReporter.  If `morfs` is empty or None, then the list of all files
        measured is used to find the FileReporters.

        """
        if not morfs:
            morfs = self.data.measured_files()

        # Be sure we have a list.
        if not isinstance(morfs, (list, tuple)):
            morfs = [morfs]

        file_reporters = []
        for morf in morfs:
            file_reporter = self._get_file_reporter(morf)
            file_reporters.append(file_reporter)

        return file_reporters

    def report(
        self,
        morfs=None,
        show_missing=True,
        ignore_errors=None,
        file=None,  # pylint: disable=redefined-builtin
        omit=None,
        include=None,
        skip_covered=False,
    ):
        """Write a summary report to `file`.

        Each module in `morfs` is listed, with counts of statements, executed
        statements, missing statements, and a list of lines missed.

        `include` is a list of filename patterns.  Modules whose filenames
        match those patterns will be included in the report. Modules matching
        `omit` will not be included in the report.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors,
            omit=omit,
            include=include,
            show_missing=show_missing,
            skip_covered=skip_covered,
        )
        reporter = SummaryReporter(self, self.config)
        return reporter.report(morfs, outfile=file)

    def annotate(
        self,
        morfs=None,
        directory=None,
        ignore_errors=None,
        omit=None,
        include=None,
    ):
        """Annotate a list of modules.

        Each module in `morfs` is annotated.  The source is written to a new
        file, named with a ",cover" suffix, with each line prefixed with a
        marker to indicate the coverage of the line.  Covered lines have ">",
        excluded lines have "-", and missing lines have "!".

        See `coverage.report()` for other arguments.

        """
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors,
                              omit=omit,
                              include=include)
        reporter = AnnotateReporter(self, self.config)
        reporter.report(morfs, directory=directory)

    def html_report(self,
                    morfs=None,
                    directory=None,
                    ignore_errors=None,
                    omit=None,
                    include=None,
                    extra_css=None,
                    title=None):
        """Generate an HTML report.

        The HTML is written to `directory`.  The file "index.html" is the
        overview starting point, with links to more detailed pages for
        individual modules.

        `extra_css` is a path to a file of other CSS to apply on the page.
        It will be copied into the HTML directory.

        `title` is a text string (not HTML) to use as the title of the HTML
        report.

        See `coverage.report()` for other arguments.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors,
            omit=omit,
            include=include,
            html_dir=directory,
            extra_css=extra_css,
            html_title=title,
        )
        reporter = HtmlReporter(self, self.config)
        return reporter.report(morfs)

    def xml_report(
        self,
        morfs=None,
        outfile=None,
        ignore_errors=None,
        omit=None,
        include=None,
    ):
        """Generate an XML report of coverage results.

        The report is compatible with Cobertura reports.

        Each module in `morfs` is included in the report.  `outfile` is the
        path to write the file to, "-" will write to stdout.

        See `coverage.report()` for other arguments.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors,
            omit=omit,
            include=include,
            xml_output=outfile,
        )
        file_to_close = None
        delete_file = False
        if self.config.xml_output:
            if self.config.xml_output == '-':
                outfile = sys.stdout
            else:
                # Ensure that the output directory is created; done here
                # because this report pre-opens the output file.
                # HTMLReport does this using the Report plumbing because
                # its task is more complex, being multiple files.
                output_dir = os.path.dirname(self.config.xml_output)
                if output_dir and not os.path.isdir(output_dir):
                    os.makedirs(output_dir)
                outfile = open(self.config.xml_output, "w")
                file_to_close = outfile
        try:
            reporter = XmlReporter(self, self.config)
            return reporter.report(morfs, outfile=outfile)
        except CoverageException:
            delete_file = True
            raise
        finally:
            if file_to_close:
                file_to_close.close()
                if delete_file:
                    file_be_gone(self.config.xml_output)

    def sys_info(self):
        """Return a list of (key, value) pairs showing internal information."""

        import coverage as covmod

        self._init()
        try:
            implementation = platform.python_implementation()
        except AttributeError:
            implementation = "unknown"

        info = [
            ('version', covmod.__version__),
            ('coverage', covmod.__file__),
            ('cover_dir', self.cover_dir),
            ('pylib_dirs', self.pylib_dirs),
            ('tracer', self.collector.tracer_name()),
            ('file_tracers',
             [ft._coverage_plugin_name for ft in self.file_tracers]),
            ('config_files', self.config.attempted_config_files),
            ('configs_read', self.config.config_files),
            ('data_path', self.data.filename),
            ('python', sys.version.replace('\n', '')),
            ('platform', platform.platform()),
            ('implementation', implementation),
            ('executable', sys.executable),
            ('cwd', os.getcwd()),
            ('path', sys.path),
            ('environment',
             sorted(("%s = %s" % (k, v)) for k, v in iitems(os.environ)
                    if k.startswith(("COV", "PY")))),
            ('command_line', " ".join(getattr(sys, 'argv', ['???']))),
        ]

        matcher_names = [
            'source_match',
            'source_pkgs_match',
            'include_match',
            'omit_match',
            'cover_match',
            'pylib_match',
        ]

        for matcher_name in matcher_names:
            matcher = getattr(self, matcher_name)
            if matcher:
                matcher_info = matcher.info()
            else:
                matcher_info = '-none-'
            info.append((matcher_name, matcher_info))

        return info
Ejemplo n.º 33
0
Archivo: control.py Proyecto: th0/test2
    def _init(self):
        """Set all the initial state.

        This is called by the public methods to initialize state. This lets us
        construct a Coverage object, then tweak its state before this function
        is called.

        """
        from coverage import __version__

        if self._inited:
            return

        # Create and configure the debugging controller.
        if self._debug_file is None:
            self._debug_file = sys.stderr
        self.debug = DebugControl(self.config.debug, self._debug_file)

        # Load plugins
        self.plugins = Plugins.load_plugins(self.config.plugins, self.config)

        self.file_tracers = []
        for plugin in self.plugins:
            if overrides(plugin, "file_tracer", CoveragePlugin):
                self.file_tracers.append(plugin)

        # _exclude_re is a dict that maps exclusion list names to compiled
        # regexes.
        self._exclude_re = {}
        self._exclude_regex_stale()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)

        concurrency = self.config.concurrency
        if concurrency == "multiprocessing":
            patch_multiprocessing()
            concurrency = None

        self.collector = Collector(
            should_trace=self._should_trace,
            check_include=self._check_include_omit_etc,
            timid=self.config.timid,
            branch=self.config.branch,
            warn=self._warn,
            concurrency=concurrency,
        )

        # Early warning if we aren't going to be able to support plugins.
        if self.file_tracers and not self.collector.supports_plugins:
            raise CoverageException(
                "Plugin file tracers (%s) aren't supported with %s" % (
                    ", ".join(ft._coverage_plugin_name
                              for ft in self.file_tracers),
                    self.collector.tracer_name(),
                ))

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if self._data_suffix or self.config.parallel:
            if not isinstance(self._data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                self._data_suffix = True
        else:
            self._data_suffix = None
        self.data_suffix = None
        self.run_suffix = self._data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(
            basename=self.config.data_file,
            collector="coverage v%s" % __version__,
            debug=self.debug,
        )

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = set()
        if not self.config.cover_pylib:
            # Look at where some standard modules are located. That's the
            # indication for "installed with the interpreter". In some
            # environments (virtualenv, for example), these modules may be
            # spread across a few locations. Look at all the candidate modules
            # we've imported, and take all the different ones.
            for m in (atexit, os, platform, random, socket, _structseq):
                if m is not None and hasattr(m, "__file__"):
                    self.pylib_dirs.add(self._canonical_dir(m))
            if _structseq and not hasattr(_structseq, '__file__'):
                # PyPy 2.4 has no __file__ in the builtin modules, but the code
                # objects still have the filenames.  So dig into one to find
                # the path to exclude.
                structseq_new = _structseq.structseq_new
                try:
                    structseq_file = structseq_new.func_code.co_filename
                except AttributeError:
                    structseq_file = structseq_new.__code__.co_filename
                self.pylib_dirs.add(self._canonical_dir(structseq_file))

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self._canonical_dir(__file__)

        # Set the reporting precision.
        Numbers.set_precision(self.config.precision)

        atexit.register(self._atexit)

        self._inited = True

        # Create the matchers we need for _should_trace
        if self.source or self.source_pkgs:
            self.source_match = TreeMatcher(self.source)
            self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
        else:
            if self.cover_dir:
                self.cover_match = TreeMatcher([self.cover_dir])
            if self.pylib_dirs:
                self.pylib_match = TreeMatcher(self.pylib_dirs)
        if self.include:
            self.include_match = FnmatchMatcher(self.include)
        if self.omit:
            self.omit_match = FnmatchMatcher(self.omit)

        # The user may want to debug things, show info if desired.
        wrote_any = False
        if self.debug.should('config'):
            config_info = sorted(self.config.__dict__.items())
            self.debug.write_formatted_info("config", config_info)
            wrote_any = True

        if self.debug.should('sys'):
            self.debug.write_formatted_info("sys", self.sys_info())
            for plugin in self.plugins:
                header = "sys: " + plugin._coverage_plugin_name
                info = plugin.sys_info()
                self.debug.write_formatted_info(header, info)
            wrote_any = True

        if wrote_any:
            self.debug.write_formatted_info("end", ())
Ejemplo n.º 34
0
    def __init__(self, data_file = None, data_suffix = None, cover_pylib = None, auto_data = False, timid = None, branch = None, config_file = True, source = None, omit = None, include = None, debug = None, debug_file = None):
        from coverage import __version__
        self._warnings = []
        self.config = CoverageConfig()
        if config_file:
            if config_file is True:
                config_file = '.coveragerc'
            try:
                self.config.from_file(config_file)
            except ValueError:
                _, err, _ = sys.exc_info()
                raise CoverageException("Couldn't read config file %s: %s" % (config_file, err))

        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file
        self.config.from_args(data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include, debug=debug)
        self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)
        self.auto_data = auto_data
        self._exclude_re = {}
        self._exclude_regex_stale()
        self.file_locator = FileLocator()
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)
        self.collector = Collector(self._should_trace, timid=self.config.timid, branch=self.config.branch, warn=self._warn)
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix
        self.data = CoverageData(basename=self.config.data_file, collector='coverage v%s' % __version__, debug=self.debug)
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            for m in (atexit,
             os,
             random,
             socket,
             _structseq):
                if m is not None and hasattr(m, '__file__'):
                    m_dir = self._canonical_dir(m)
                    if m_dir not in self.pylib_dirs:
                        self.pylib_dirs.append(m_dir)

        self.cover_dir = self._canonical_dir(__file__)
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None
        Numbers.set_precision(self.config.precision)
        self._warn_no_data = True
        self._warn_unimported_source = True
        self._started = False
        self._measured = False
        atexit.register(self._atexit)
Ejemplo n.º 35
0
class MatcherTest(CoverageTest):
    """Tests of file matchers."""
    def setUp(self):
        super(MatcherTest, self).setUp()
        self.fl = FileLocator()

    def assertMatches(self, matcher, filepath, matches):
        """The `matcher` should agree with `matches` about `filepath`."""
        canonical = self.fl.canonical_filename(filepath)
        self.assertEqual(
            matcher.match(canonical), matches,
            "File %s should have matched as %s" % (filepath, matches))

    def test_tree_matcher(self):
        matches_to_try = [
            (self.make_file("sub/file1.py"), True),
            (self.make_file("sub/file2.c"), True),
            (self.make_file("sub2/file3.h"), False),
            (self.make_file("sub3/file4.py"), True),
            (self.make_file("sub3/file5.c"), False),
        ]
        fl = FileLocator()
        trees = [
            fl.canonical_filename("sub"),
            fl.canonical_filename("sub3/file4.py"),
        ]
        tm = TreeMatcher(trees)
        self.assertEqual(tm.info(), trees)
        for filepath, matches in matches_to_try:
            self.assertMatches(tm, filepath, matches)

    def test_module_matcher(self):
        matches_to_try = [
            ('test', True),
            ('trash', False),
            ('testing', False),
            ('test.x', True),
            ('test.x.y.z', True),
            ('py', False),
            ('py.t', False),
            ('py.test', True),
            ('py.testing', False),
            ('py.test.buz', True),
            ('py.test.buz.baz', True),
            ('__main__', False),
            ('mymain', True),
            ('yourmain', False),
        ]
        modules = ['test', 'py.test', 'mymain']
        mm = ModuleMatcher(modules)
        self.assertEqual(mm.info(), modules)
        for modulename, matches in matches_to_try:
            self.assertEqual(
                mm.match(modulename),
                matches,
                modulename,
            )

    def test_fnmatch_matcher(self):
        matches_to_try = [
            (self.make_file("sub/file1.py"), True),
            (self.make_file("sub/file2.c"), False),
            (self.make_file("sub2/file3.h"), True),
            (self.make_file("sub3/file4.py"), True),
            (self.make_file("sub3/file5.c"), False),
        ]
        fnm = FnmatchMatcher(["*.py", "*/sub2/*"])
        self.assertEqual(fnm.info(), ["*.py", "*/sub2/*"])
        for filepath, matches in matches_to_try:
            self.assertMatches(fnm, filepath, matches)

    def test_fnmatch_matcher_overload(self):
        fnm = FnmatchMatcher(["*x%03d*.txt" % i for i in range(500)])
        self.assertMatches(fnm, "x007foo.txt", True)
        self.assertMatches(fnm, "x123foo.txt", True)
        self.assertMatches(fnm, "x798bar.txt", False)

    def test_fnmatch_windows_paths(self):
        # We should be able to match Windows paths even if we are running on
        # a non-Windows OS.
        fnm = FnmatchMatcher(["*/foo.py"])
        self.assertMatches(fnm, r"dir\foo.py", True)
        fnm = FnmatchMatcher([r"*\foo.py"])
        self.assertMatches(fnm, r"dir\foo.py", True)
Ejemplo n.º 36
0
    def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
                auto_data=False, timid=None, branch=None, config_file=True,
                source=None, omit=None, include=None):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.

        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.

        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.

        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.

        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        `config_file` determines what config file to read.  If it is a string,
        it is the name of the config file to read.  If it is True, then a
        standard file is read (".coveragerc").  If it is False, then no file is
        read.

        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.

        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.  Each
        will also accept a single string argument.

        """
        from coverage import __version__

        # A record of all the warnings that have been issued.
        self._warnings = []

        # Build our configuration from a number of sources:
        # 1: defaults:
        self.config = CoverageConfig()

        # 2: from the coveragerc file:
        if config_file:
            if config_file is True:
                config_file = ".coveragerc"
            try:
                self.config.from_file(config_file)
            except ValueError:
                _, err, _ = sys.exc_info()
                raise CoverageException(
                    "Couldn't read config file %s: %s" % (config_file, err)
                    )

        # 3: from environment variables:
        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file

        # 4: from constructor arguments:
        if isinstance(omit, string_class):
            omit = [omit]
        if isinstance(include, string_class):
            include = [include]
        self.config.from_args(
            data_file=data_file, cover_pylib=cover_pylib, timid=timid,
            branch=branch, parallel=bool_or_none(data_suffix),
            source=source, omit=omit, include=include
            )

        self.auto_data = auto_data
        self.atexit_registered = False

        # _exclude_re is a dict mapping exclusion list names to compiled
        # regexes.
        self._exclude_re = {}
        self._exclude_regex_stale()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = self._prep_patterns(self.config.omit)
        self.include = self._prep_patterns(self.config.include)

        self.collector = Collector(
            self._should_trace, timid=self.config.timid,
            branch=self.config.branch, warn=self._warn
            )

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(
            basename=self.config.data_file,
            collector="coverage v%s" % __version__
            )

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            # Look at where some standard modules are located. That's the
            # indication for "installed with the interpreter". In some
            # environments (virtualenv, for centralfitestoque), these modules may be
            # spread across a few locations. Look at all the candidate modules
            # we've imported, and take all the different ones.
            for m in (atexit, os, random, socket):
                if hasattr(m, "__file__"):
                    m_dir = self._canonical_dir(m.__file__)
                    if m_dir not in self.pylib_dirs:
                        self.pylib_dirs.append(m_dir)

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self._canonical_dir(__file__)

        # The matchers for _should_trace, created when tracing starts.
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None

        # Only _harvest_data once per measurement cycle.
        self._harvested = False

        # Set the reporting precision.
        Numbers.set_precision(self.config.precision)

        # When tearing down the coverage object, modules can become None.
        # Saving the modules as object attributes avoids problems, but it is
        # quite ad-hoc which modules need to be saved and which references
        # need to use the object attributes.
        self.socket = socket
        self.os = os
        self.random = random
Ejemplo n.º 37
0
class coverage(object):
    """Programmatic access to Coverage.

    To use::

        from coverage import coverage

        cov = coverage()
        cov.start()
        #.. blah blah (run your code) blah blah ..
        cov.stop()
        cov.html_report(directory='covhtml')

    """
    def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
                auto_data=False, timid=None, branch=None, config_file=True,
                source=None, omit=None, include=None):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.

        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.

        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.

        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.

        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        `config_file` determines what config file to read.  If it is a string,
        it is the name of the config file to read.  If it is True, then a
        standard file is read (".coveragerc").  If it is False, then no file is
        read.

        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.

        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.  Each
        will also accept a single string argument.

        """
        from coverage import __version__

        # A record of all the warnings that have been issued.
        self._warnings = []

        # Build our configuration from a number of sources:
        # 1: defaults:
        self.config = CoverageConfig()

        # 2: from the coveragerc file:
        if config_file:
            if config_file is True:
                config_file = ".coveragerc"
            try:
                self.config.from_file(config_file)
            except ValueError:
                _, err, _ = sys.exc_info()
                raise CoverageException(
                    "Couldn't read config file %s: %s" % (config_file, err)
                    )

        # 3: from environment variables:
        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file

        # 4: from constructor arguments:
        if isinstance(omit, string_class):
            omit = [omit]
        if isinstance(include, string_class):
            include = [include]
        self.config.from_args(
            data_file=data_file, cover_pylib=cover_pylib, timid=timid,
            branch=branch, parallel=bool_or_none(data_suffix),
            source=source, omit=omit, include=include
            )

        self.auto_data = auto_data
        self.atexit_registered = False

        # _exclude_re is a dict mapping exclusion list names to compiled
        # regexes.
        self._exclude_re = {}
        self._exclude_regex_stale()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = self._prep_patterns(self.config.omit)
        self.include = self._prep_patterns(self.config.include)

        self.collector = Collector(
            self._should_trace, timid=self.config.timid,
            branch=self.config.branch, warn=self._warn
            )

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(
            basename=self.config.data_file,
            collector="coverage v%s" % __version__
            )

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            # Look at where some standard modules are located. That's the
            # indication for "installed with the interpreter". In some
            # environments (virtualenv, for centralfitestoque), these modules may be
            # spread across a few locations. Look at all the candidate modules
            # we've imported, and take all the different ones.
            for m in (atexit, os, random, socket):
                if hasattr(m, "__file__"):
                    m_dir = self._canonical_dir(m.__file__)
                    if m_dir not in self.pylib_dirs:
                        self.pylib_dirs.append(m_dir)

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self._canonical_dir(__file__)

        # The matchers for _should_trace, created when tracing starts.
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None

        # Only _harvest_data once per measurement cycle.
        self._harvested = False

        # Set the reporting precision.
        Numbers.set_precision(self.config.precision)

        # When tearing down the coverage object, modules can become None.
        # Saving the modules as object attributes avoids problems, but it is
        # quite ad-hoc which modules need to be saved and which references
        # need to use the object attributes.
        self.socket = socket
        self.os = os
        self.random = random

    def _canonical_dir(self, f):
        """Return the canonical directory of the file `f`."""
        return os.path.split(self.file_locator.canonical_filename(f))[0]

    def _source_for_file(self, filename):
        """Return the source file for `filename`."""
        if not filename.endswith(".py"):
            if filename[-4:-1] == ".py":
                filename = filename[:-1]
        return filename

    def _should_trace(self, filename, frame):
        """Decide whether to trace execution in `filename`

        This function is called from the trace function.  As each new file name
        is encountered, this function determines whether it is traced or not.

        Returns a canonicalized filename if it should be traced, False if it
        should not.

        """
        if os is None:
            return False

        if filename.startswith('<'):
            # Lots of non-file execution is represented with artificial
            # filenames like "<string>", "<doctest readme.txt[0]>", or
            # "<exec_function>".  Don't ever trace these executions, since we
            # can't do anything with the data later anyway.
            return False

        if filename.endswith(".html"):
            # Jinja and maybe other templating systems compile templates into
            # Python code, but use the template filename as the filename in
            # the compiled code.  Of course, those filenames are useless later
            # so don't bother collecting.  TODO: How should we really separate
            # out good file extensions from bad?
            return False

        self._check_for_packages()

        # Compiled Python files have two filenames: frame.f_code.co_filename is
        # the filename at the time the .pyc was compiled.  The second name is
        # __file__, which is where the .pyc was actually loaded from.  Since
        # .pyc files can be moved after compilation (for centralfitestoque, by being
        # installed), we look for __file__ in the frame and prefer it to the
        # co_filename value.
        dunder_file = frame.f_globals.get('__file__')
        if dunder_file:
            filename = self._source_for_file(dunder_file)

        # Jython reports the .class file to the tracer, use the source file.
        if filename.endswith("$py.class"):
            filename = filename[:-9] + ".py"

        canonical = self.file_locator.canonical_filename(filename)

        # If the user specified source, then that's authoritative about what to
        # measure.  If they didn't, then we have to exclude the stdlib and
        # coverage.py directories.
        if self.source_match:
            if not self.source_match.match(canonical):
                return False
        else:
            # If we aren't supposed to trace installed code, then check if this
            # is near the Python standard library and skip it if so.
            if self.pylib_match and self.pylib_match.match(canonical):
                return False

            # We exclude the coverage code itself, since a little of it will be
            # measured otherwise.
            if self.cover_match and self.cover_match.match(canonical):
                return False

        # Check the file against the include and omit patterns.
        if self.include_match and not self.include_match.match(canonical):
            return False
        if self.omit_match and self.omit_match.match(canonical):
            return False

        return canonical

    # To log what should_trace returns, change this to "if 1:"
    if 0:
        _real_should_trace = _should_trace
        def _should_trace(self, filename, frame):   # pylint: disable=E0102
            """A logging decorator around the real _should_trace function."""
            ret = self._real_should_trace(filename, frame)
            print("should_trace: %r -> %r" % (filename, ret))
            return ret

    def _warn(self, msg):
        """Use `msg` as a warning."""
        self._warnings.append(msg)
        sys.stderr.write("Coverage.py warning: %s\n" % msg)

    def _prep_patterns(self, patterns):
        """Prepare the file patterns for use in a `FnmatchMatcher`.

        If a pattern starts with a wildcard, it is used as a pattern
        as-is.  If it does not start with a wildcard, then it is made
        absolute with the current directory.

        If `patterns` is None, an empty list is returned.

        """
        patterns = patterns or []
        prepped = []
        for p in patterns or []:
            if p.startswith("*") or p.startswith("?"):
                prepped.append(p)
            else:
                prepped.append(self.file_locator.abs_file(p))
        return prepped

    def _check_for_packages(self):
        """Update the source_match matcher with latest imported packages."""
        # Our self.source_pkgs attribute is a list of package names we want to
        # measure.  Each time through here, we see if we've imported any of
        # them yet.  If so, we add its file to source_match, and we don't have
        # to look for that package any more.
        if self.source_pkgs:
            found = []
            for pkg in self.source_pkgs:
                try:
                    mod = sys.modules[pkg]
                except KeyError:
                    continue

                found.append(pkg)

                try:
                    pkg_file = mod.__file__
                except AttributeError:
                    self._warn("Module %s has no Python source." % pkg)
                else:
                    d, f = os.path.split(pkg_file)
                    if f.startswith('__init__.'):
                        # This is actually a package, return the directory.
                        pkg_file = d
                    else:
                        pkg_file = self._source_for_file(pkg_file)
                    pkg_file = self.file_locator.canonical_filename(pkg_file)
                    self.source.append(pkg_file)
                    self.source_match.add(pkg_file)

            for pkg in found:
                self.source_pkgs.remove(pkg)

    def use_cache(self, usecache):
        """Control the use of a data file (incorrectly called a cache).

        `usecache` is true or false, whether to read and write data on disk.

        """
        self.data.usefile(usecache)

    def load(self):
        """Load previously-collected coverage data from the data file."""
        self.collector.reset()
        self.data.read()

    def start(self):
        """Start measuring code coverage."""
        if self.run_suffix:
            # Calling start() means we're running code, so use the run_suffix
            # as the data_suffix when we eventually save the data.
            self.data_suffix = self.run_suffix
        if self.auto_data:
            self.load()
            # Save coverage data when Python exits.
            if not self.atexit_registered:
                atexit.register(self.save)
                self.atexit_registered = True

        # Create the matchers we need for _should_trace
        if self.source or self.source_pkgs:
            self.source_match = TreeMatcher(self.source)
        else:
            if self.cover_dir:
                self.cover_match = TreeMatcher([self.cover_dir])
            if self.pylib_dirs:
                self.pylib_match = TreeMatcher(self.pylib_dirs)
        if self.include:
            self.include_match = FnmatchMatcher(self.include)
        if self.omit:
            self.omit_match = FnmatchMatcher(self.omit)

        self._harvested = False
        self.collector.start()

    def stop(self):
        """Stop measuring code coverage."""
        self.collector.stop()
        self._harvest_data()

    def erase(self):
        """Erase previously-collected coverage data.

        This removes the in-memory data collected in this session as well as
        discarding the data file.

        """
        self.collector.reset()
        self.data.erase()

    def clear_exclude(self, which='exclude'):
        """Clear the exclude list."""
        setattr(self.config, which + "_list", [])
        self._exclude_regex_stale()

    def exclude(self, regex, which='exclude'):
        """Exclude source lines from execution consideration.

        A number of lists of regular expressions are maintained.  Each list
        selects lines that are treated differently during reporting.

        `which` determines which list is modified.  The "exclude" list selects
        lines that are not considered executable at all.  The "partial" list
        indicates lines with branches that are not taken.

        `regex` is a regular expression.  The regex is added to the specified
        list.  If any of the regexes in the list is found in a line, the line
        is marked for special treatment during reporting.

        """
        excl_list = getattr(self.config, which + "_list")
        excl_list.append(regex)
        self._exclude_regex_stale()

    def _exclude_regex_stale(self):
        """Drop all the compiled exclusion regexes, a list was modified."""
        self._exclude_re.clear()

    def _exclude_regex(self, which):
        """Return a compiled regex for the given exclusion list."""
        if which not in self._exclude_re:
            excl_list = getattr(self.config, which + "_list")
            self._exclude_re[which] = join_regex(excl_list)
        return self._exclude_re[which]

    def get_exclude_list(self, which='exclude'):
        """Return a list of excluded regex patterns.

        `which` indicates which list is desired.  See `exclude` for the lists
        that are available, and their meaning.

        """
        return getattr(self.config, which + "_list")

    def save(self):
        """Save the collected coverage data to the data file."""
        data_suffix = self.data_suffix
        if data_suffix is True:
            # If data_suffix was a simple true value, then make a suffix with
            # plenty of distinguishing information.  We do this here in
            # `save()` at the last minute so that the pid will be correct even
            # if the process forks.
            data_suffix = "%s.%s.%06d" % (
                self.socket.gethostname(), self.os.getpid(),
                self.random.randint(0, 99999)
                )

        self._harvest_data()
        self.data.write(suffix=data_suffix)

    def combine(self):
        """Combine together a number of similarly-named coverage data files.

        All coverage data files whose name starts with `data_file` (from the
        coverage() constructor) will be read, and combined together into the
        current measurements.

        """
        self.data.combine_parallel_data()

    def _harvest_data(self):
        """Get the collected data and reset the collector.

        Also warn about various problems collecting data.

        """
        if not self._harvested:
            self.data.add_line_data(self.collector.get_line_data())
            self.data.add_arc_data(self.collector.get_arc_data())
            self.collector.reset()

            # If there are still entries in the source_pkgs list, then we never
            # encountered those packages.
            for pkg in self.source_pkgs:
                self._warn("Module %s was never imported." % pkg)

            # Find out if we got any data.
            summary = self.data.summary()
            if not summary:
                self._warn("No data was collected.")

            # Find files that were never executed at all.
            for src in self.source:
                for py_file in find_python_files(src):
                    self.data.touch_file(py_file)

            self._harvested = True

    # Backward compatibility with version 1.
    def analysis(self, morf):
        """Like `analysis2` but doesn't return excluded line numbers."""
        f, s, _, m, mf = self.analysis2(morf)
        return f, s, m, mf

    def analysis2(self, morf):
        """Analyze a module.

        `morf` is a module or a filename.  It will be analyzed to determine
        its coverage statistics.  The return value is a 5-tuple:

        * The filename for the module.
        * A list of line numbers of executable statements.
        * A list of line numbers of excluded statements.
        * A list of line numbers of statements not run (missing from
          execution).
        * A readable formatted string of the missing line numbers.

        The analysis uses the source file itself and the current measured
        coverage data.

        """
        analysis = self._analyze(morf)
        return (
            analysis.filename, analysis.statements, analysis.excluded,
            analysis.missing, analysis.missing_formatted()
            )

    def _analyze(self, it):
        """Analyze a single morf or code unit.

        Returns an `Analysis` object.

        """
        if not isinstance(it, CodeUnit):
            it = code_unit_factory(it, self.file_locator)[0]

        return Analysis(self, it)

    def report(self, morfs=None, show_missing=True, ignore_errors=None,
                file=None,                          # pylint: disable=W0622
                omit=None, include=None
                ):
        """Write a summary report to `file`.

        Each module in `morfs` is listed, with counts of statements, executed
        statements, missing statements, and a list of lines missed.

        `include` is a list of filename patterns.  Modules whose filenames
        match those patterns will be included in the report. Modules matching
        `omit` will not be included in the report.

        """
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include
            )
        reporter = SummaryReporter(
            self, show_missing, self.config.ignore_errors
            )
        reporter.report(morfs, outfile=file, config=self.config)

    def annotate(self, morfs=None, directory=None, ignore_errors=None,
                    omit=None, include=None):
        """Annotate a list of modules.

        Each module in `morfs` is annotated.  The source is written to a new
        file, named with a ",cover" suffix, with each line prefixed with a
        marker to indicate the coverage of the line.  Covered lines have ">",
        excluded lines have "-", and missing lines have "!".

        See `coverage.report()` for other arguments.

        """
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include
            )
        reporter = AnnotateReporter(self, self.config.ignore_errors)
        reporter.report(morfs, config=self.config, directory=directory)

    def html_report(self, morfs=None, directory=None, ignore_errors=None,
                    omit=None, include=None):
        """Generate an HTML report.

        See `coverage.report()` for other arguments.

        """
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include,
            html_dir=directory,
            )
        reporter = HtmlReporter(self, self.config.ignore_errors)
        reporter.report(morfs, config=self.config)

    def xml_report(self, morfs=None, outfile=None, ignore_errors=None,
                    omit=None, include=None):
        """Generate an XML report of coverage results.

        The report is compatible with Cobertura reports.

        Each module in `morfs` is included in the report.  `outfile` is the
        path to write the file to, "-" will write to stdout.

        See `coverage.report()` for other arguments.

        """
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include,
            xml_output=outfile,
            )
        file_to_close = None
        if self.config.xml_output:
            if self.config.xml_output == '-':
                outfile = sys.stdout
            else:
                outfile = open(self.config.xml_output, "w")
                file_to_close = outfile
        try:
            reporter = XmlReporter(self, self.config.ignore_errors)
            reporter.report(morfs, outfile=outfile, config=self.config)
        finally:
            if file_to_close:
                file_to_close.close()

    def sysinfo(self):
        """Return a list of (key, value) pairs showing internal information."""

        import coverage as covmod
        import platform, re

        info = [
            ('version', covmod.__version__),
            ('coverage', covmod.__file__),
            ('cover_dir', self.cover_dir),
            ('pylib_dirs', self.pylib_dirs),
            ('tracer', self.collector.tracer_name()),
            ('data_path', self.data.filename),
            ('python', sys.version.replace('\n', '')),
            ('platform', platform.platform()),
            ('cwd', os.getcwd()),
            ('path', sys.path),
            ('environment', [
                ("%s = %s" % (k, v)) for k, v in os.environ.items()
                    if re.search("^COV|^PY", k)
                ]),
            ]
        return info
Ejemplo n.º 38
0
    def _init(self):
        """Set all the initial state.

        This is called by the public methods to initialize state. This lets us
        construct a Coverage object, then tweak its state before this function
        is called.

        """
        from coverage import __version__

        if self._inited:
            return

        # Create and configure the debugging controller.
        if self._debug_file is None:
            self._debug_file = sys.stderr
        self.debug = DebugControl(self.config.debug, self._debug_file)

        # Load plugins
        self.plugins = Plugins.load_plugins(self.config.plugins, self.config)

        self.file_tracing_plugins = []
        for plugin in self.plugins:
            if overrides(plugin, "file_tracer", CoveragePlugin):
                self.file_tracing_plugins.append(plugin)

        # _exclude_re is a dict that maps exclusion list names to compiled
        # regexes.
        self._exclude_re = {}
        self._exclude_regex_stale()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)

        concurrency = self.config.concurrency
        if concurrency == "multiprocessing":
            patch_multiprocessing()
            concurrency = None

        self.collector = Collector(
            should_trace=self._should_trace,
            check_include=self._check_include_omit_etc,
            timid=self.config.timid,
            branch=self.config.branch,
            warn=self._warn,
            concurrency=concurrency,
            )

        # Early warning if we aren't going to be able to support plugins.
        if self.file_tracing_plugins and not self.collector.supports_plugins:
            self._warn(
                "Plugin file tracers (%s) aren't supported with %s" % (
                    ", ".join(
                        plugin._coverage_plugin_name
                            for plugin in self.file_tracing_plugins
                        ),
                    self.collector.tracer_name(),
                    )
                )
            for plugin in self.file_tracing_plugins:
                plugin._coverage_enabled = False

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if self._data_suffix or self.config.parallel:
            if not isinstance(self._data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                self._data_suffix = True
        else:
            self._data_suffix = None
        self.data_suffix = None
        self.run_suffix = self._data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(
            basename=self.config.data_file,
            collector="coverage v%s" % __version__,
            debug=self.debug,
            )

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = set()
        if not self.config.cover_pylib:
            # Look at where some standard modules are located. That's the
            # indication for "installed with the interpreter". In some
            # environments (virtualenv, for example), these modules may be
            # spread across a few locations. Look at all the candidate modules
            # we've imported, and take all the different ones.
            for m in (atexit, os, platform, random, socket, _structseq):
                if m is not None and hasattr(m, "__file__"):
                    self.pylib_dirs.add(self._canonical_dir(m))
            if _structseq and not hasattr(_structseq, '__file__'):
                # PyPy 2.4 has no __file__ in the builtin modules, but the code
                # objects still have the filenames.  So dig into one to find
                # the path to exclude.
                structseq_new = _structseq.structseq_new
                try:
                    structseq_file = structseq_new.func_code.co_filename
                except AttributeError:
                    structseq_file = structseq_new.__code__.co_filename
                self.pylib_dirs.add(self._canonical_dir(structseq_file))

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self._canonical_dir(__file__)

        # Set the reporting precision.
        Numbers.set_precision(self.config.precision)

        atexit.register(self._atexit)

        self._inited = True

        # Create the matchers we need for _should_trace
        if self.source or self.source_pkgs:
            self.source_match = TreeMatcher(self.source)
            self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
        else:
            if self.cover_dir:
                self.cover_match = TreeMatcher([self.cover_dir])
            if self.pylib_dirs:
                self.pylib_match = TreeMatcher(self.pylib_dirs)
        if self.include:
            self.include_match = FnmatchMatcher(self.include)
        if self.omit:
            self.omit_match = FnmatchMatcher(self.omit)

        # The user may want to debug things, show info if desired.
        wrote_any = False
        if self.debug.should('config'):
            config_info = sorted(self.config.__dict__.items())
            self.debug.write_formatted_info("config", config_info)
            wrote_any = True

        if self.debug.should('sys'):
            self.debug.write_formatted_info("sys", self.sys_info())
            for plugin in self.plugins:
                header = "sys: " + plugin._coverage_plugin_name
                info = plugin.sys_info()
                self.debug.write_formatted_info(header, info)
            wrote_any = True

        if wrote_any:
            self.debug.write_formatted_info("end", ())
Ejemplo n.º 39
0
class Coverage(object):
    """Programmatic access to coverage.py.

    To use::

        from coverage import Coverage

        cov = Coverage()
        cov.start()
        #.. call your code ..
        cov.stop()
        cov.html_report(directory='covhtml')

    """
    def __init__(
        self, data_file=None, data_suffix=None, cover_pylib=None,
        auto_data=False, timid=None, branch=None, config_file=True,
        source=None, omit=None, include=None, debug=None,
        concurrency=None,
    ):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.

        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.

        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.

        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.

        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        `config_file` determines what configuration file to read:

            * If it is ".coveragerc", it is interpreted as if it were True,
              for backward compatibility.

            * If it is a string, it is the name of the file to read.  If the
              file can't be read, it is an error.

            * If it is True, then a few standard files names are tried
              (".coveragerc", "setup.cfg").  It is not an error for these files
              to not be found.

            * If it is False, then no configuration file is read.

        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.

        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.  Each
        will also accept a single string argument.

        `debug` is a list of strings indicating what debugging information is
        desired.

        `concurrency` is a string indicating the concurrency library being used
        in the measured code.  Without this, coverage.py will get incorrect
        results.  Valid strings are "greenlet", "eventlet", "gevent", or
        "thread" (the default).

        """
        # Build our configuration from a number of sources:
        # 1: defaults:
        self.config = CoverageConfig()

        # 2: from the rcfile, .coveragerc or setup.cfg file:
        if config_file:
            did_read_rc = False
            # Some API users were specifying ".coveragerc" to mean the same as
            # True, so make it so.
            if config_file == ".coveragerc":
                config_file = True
            specified_file = (config_file is not True)
            if not specified_file:
                config_file = ".coveragerc"

            did_read_rc = self.config.from_file(config_file)

            if not did_read_rc:
                if specified_file:
                    raise CoverageException(
                        "Couldn't read '%s' as a config file" % config_file
                        )
                self.config.from_file("setup.cfg", section_prefix="coverage:")

        # 3: from environment variables:
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file

        # 4: from constructor arguments:
        self.config.from_args(
            data_file=data_file, cover_pylib=cover_pylib, timid=timid,
            branch=branch, parallel=bool_or_none(data_suffix),
            source=source, omit=omit, include=include, debug=debug,
            concurrency=concurrency,
            )

        self._debug_file = None
        self._auto_data = auto_data
        self._data_suffix = data_suffix

        # The matchers for _should_trace.
        self.source_match = None
        self.source_pkgs_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None

        # Is it ok for no data to be collected?
        self._warn_no_data = True
        self._warn_unimported_source = True

        # A record of all the warnings that have been issued.
        self._warnings = []

        # Other instance attributes, set later.
        self.omit = self.include = self.source = None
        self.source_pkgs = self.file_locator = None
        self.data = self.collector = None
        self.plugins = self.file_tracing_plugins = None
        self.pylib_dirs = self.cover_dir = None
        self.data_suffix = self.run_suffix = None
        self._exclude_re = None
        self.debug = None

        # State machine variables:
        # Have we initialized everything?
        self._inited = False
        # Have we started collecting and not stopped it?
        self._started = False
        # Have we measured some data and not harvested it?
        self._measured = False

    def _init(self):
        """Set all the initial state.

        This is called by the public methods to initialize state. This lets us
        construct a Coverage object, then tweak its state before this function
        is called.

        """
        from coverage import __version__

        if self._inited:
            return

        # Create and configure the debugging controller.
        if self._debug_file is None:
            self._debug_file = sys.stderr
        self.debug = DebugControl(self.config.debug, self._debug_file)

        # Load plugins
        self.plugins = Plugins.load_plugins(self.config.plugins, self.config)

        self.file_tracing_plugins = []
        for plugin in self.plugins:
            if overrides(plugin, "file_tracer", CoveragePlugin):
                self.file_tracing_plugins.append(plugin)

        # _exclude_re is a dict that maps exclusion list names to compiled
        # regexes.
        self._exclude_re = {}
        self._exclude_regex_stale()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)

        concurrency = self.config.concurrency
        if concurrency == "multiprocessing":
            patch_multiprocessing()
            concurrency = None

        self.collector = Collector(
            should_trace=self._should_trace,
            check_include=self._check_include_omit_etc,
            timid=self.config.timid,
            branch=self.config.branch,
            warn=self._warn,
            concurrency=concurrency,
            )

        # Early warning if we aren't going to be able to support plugins.
        if self.file_tracing_plugins and not self.collector.supports_plugins:
            self._warn(
                "Plugin file tracers (%s) aren't supported with %s" % (
                    ", ".join(
                        plugin._coverage_plugin_name
                            for plugin in self.file_tracing_plugins
                        ),
                    self.collector.tracer_name(),
                    )
                )
            for plugin in self.file_tracing_plugins:
                plugin._coverage_enabled = False

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if self._data_suffix or self.config.parallel:
            if not isinstance(self._data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                self._data_suffix = True
        else:
            self._data_suffix = None
        self.data_suffix = None
        self.run_suffix = self._data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(
            basename=self.config.data_file,
            collector="coverage v%s" % __version__,
            debug=self.debug,
            )

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = set()
        if not self.config.cover_pylib:
            # Look at where some standard modules are located. That's the
            # indication for "installed with the interpreter". In some
            # environments (virtualenv, for example), these modules may be
            # spread across a few locations. Look at all the candidate modules
            # we've imported, and take all the different ones.
            for m in (atexit, os, platform, random, socket, _structseq):
                if m is not None and hasattr(m, "__file__"):
                    self.pylib_dirs.add(self._canonical_dir(m))
            if _structseq and not hasattr(_structseq, '__file__'):
                # PyPy 2.4 has no __file__ in the builtin modules, but the code
                # objects still have the filenames.  So dig into one to find
                # the path to exclude.
                structseq_new = _structseq.structseq_new
                try:
                    structseq_file = structseq_new.func_code.co_filename
                except AttributeError:
                    structseq_file = structseq_new.__code__.co_filename
                self.pylib_dirs.add(self._canonical_dir(structseq_file))

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self._canonical_dir(__file__)

        # Set the reporting precision.
        Numbers.set_precision(self.config.precision)

        atexit.register(self._atexit)

        self._inited = True

        # Create the matchers we need for _should_trace
        if self.source or self.source_pkgs:
            self.source_match = TreeMatcher(self.source)
            self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
        else:
            if self.cover_dir:
                self.cover_match = TreeMatcher([self.cover_dir])
            if self.pylib_dirs:
                self.pylib_match = TreeMatcher(self.pylib_dirs)
        if self.include:
            self.include_match = FnmatchMatcher(self.include)
        if self.omit:
            self.omit_match = FnmatchMatcher(self.omit)

        # The user may want to debug things, show info if desired.
        wrote_any = False
        if self.debug.should('config'):
            config_info = sorted(self.config.__dict__.items())
            self.debug.write_formatted_info("config", config_info)
            wrote_any = True

        if self.debug.should('sys'):
            self.debug.write_formatted_info("sys", self.sys_info())
            for plugin in self.plugins:
                header = "sys: " + plugin._coverage_plugin_name
                info = plugin.sys_info()
                self.debug.write_formatted_info(header, info)
            wrote_any = True

        if wrote_any:
            self.debug.write_formatted_info("end", ())

    def _canonical_dir(self, morf):
        """Return the canonical directory of the module or file `morf`."""
        morf_filename = PythonFileReporter(morf, self).filename
        return os.path.split(morf_filename)[0]

    def _source_for_file(self, filename):
        """Return the source file for `filename`.

        Given a filename being traced, return the best guess as to the source
        file to attribute it to.

        """
        if filename.endswith(".py"):
            # .py files are themselves source files.
            return filename

        elif filename.endswith((".pyc", ".pyo")):
            # Bytecode files probably have source files near them.
            py_filename = filename[:-1]
            if os.path.exists(py_filename):
                # Found a .py file, use that.
                return py_filename
            if env.WINDOWS:
                # On Windows, it could be a .pyw file.
                pyw_filename = py_filename + "w"
                if os.path.exists(pyw_filename):
                    return pyw_filename
            # Didn't find source, but it's probably the .py file we want.
            return py_filename

        elif filename.endswith("$py.class"):
            # Jython is easy to guess.
            return filename[:-9] + ".py"

        # No idea, just use the filename as-is.
        return filename

    def _name_for_module(self, module_globals, filename):
        """Get the name of the module for a set of globals and filename.

        For configurability's sake, we allow __main__ modules to be matched by
        their importable name.

        If loaded via runpy (aka -m), we can usually recover the "original"
        full dotted module name, otherwise, we resort to interpreting the
        filename to get the module's name.  In the case that the module name
        can't be determined, None is returned.

        """
        dunder_name = module_globals.get('__name__', None)

        if isinstance(dunder_name, str) and dunder_name != '__main__':
            # This is the usual case: an imported module.
            return dunder_name

        loader = module_globals.get('__loader__', None)
        for attrname in ('fullname', 'name'):   # attribute renamed in py3.2
            if hasattr(loader, attrname):
                fullname = getattr(loader, attrname)
            else:
                continue

            if isinstance(fullname, str) and fullname != '__main__':
                # Module loaded via: runpy -m
                return fullname

        # Script as first argument to Python command line.
        inspectedname = inspect.getmodulename(filename)
        if inspectedname is not None:
            return inspectedname
        else:
            return dunder_name

    def _should_trace_internal(self, filename, frame):
        """Decide whether to trace execution in `filename`, with a reason.

        This function is called from the trace function.  As each new file name
        is encountered, this function determines whether it is traced or not.

        Returns a FileDisposition object.

        """
        disp = FileDisposition(filename)

        def nope(disp, reason):
            """Simple helper to make it easy to return NO."""
            disp.trace = False
            disp.reason = reason
            return disp

        # Compiled Python files have two filenames: frame.f_code.co_filename is
        # the filename at the time the .pyc was compiled.  The second name is
        # __file__, which is where the .pyc was actually loaded from.  Since
        # .pyc files can be moved after compilation (for example, by being
        # installed), we look for __file__ in the frame and prefer it to the
        # co_filename value.
        dunder_file = frame.f_globals.get('__file__')
        if dunder_file:
            filename = self._source_for_file(dunder_file)

        if not filename:
            # Empty string is pretty useless.
            return nope(disp, "empty string isn't a filename")

        if filename.startswith('memory:'):
            return nope(disp, "memory isn't traceable")

        if filename.startswith('<'):
            # Lots of non-file execution is represented with artificial
            # filenames like "<string>", "<doctest readme.txt[0]>", or
            # "<exec_function>".  Don't ever trace these executions, since we
            # can't do anything with the data later anyway.
            return nope(disp, "not a real filename")

        # Jython reports the .class file to the tracer, use the source file.
        if filename.endswith("$py.class"):
            filename = filename[:-9] + ".py"

        canonical = self.file_locator.canonical_filename(filename)
        disp.canonical_filename = canonical

        # Try the plugins, see if they have an opinion about the file.
        plugin = None
        for plugin in self.file_tracing_plugins:
            if not plugin._coverage_enabled:
                continue

            try:
                file_tracer = plugin.file_tracer(canonical)
                if file_tracer is not None:
                    file_tracer._coverage_plugin = plugin
                    disp.trace = True
                    disp.file_tracer = file_tracer
                    if file_tracer.has_dynamic_source_filename():
                        disp.has_dynamic_filename = True
                    else:
                        disp.source_filename = \
                            self.file_locator.canonical_filename(
                                file_tracer.source_filename()
                            )
                    break
            except Exception:
                self._warn(
                    "Disabling plugin %r due to an exception:" % (
                        plugin._coverage_plugin_name
                    )
                )
                traceback.print_exc()
                plugin._coverage_enabled = False
                continue
        else:
            # No plugin wanted it: it's Python.
            disp.trace = True
            disp.source_filename = canonical

        if not disp.has_dynamic_filename:
            if not disp.source_filename:
                raise CoverageException(
                    "Plugin %r didn't set source_filename for %r" %
                    (plugin, disp.original_filename)
                )
            reason = self._check_include_omit_etc_internal(
                disp.source_filename, frame,
            )
            if reason:
                nope(disp, reason)

        return disp

    def _check_include_omit_etc_internal(self, filename, frame):
        """Check a filename against the include, omit, etc, rules.

        Returns a string or None.  String means, don't trace, and is the reason
        why.  None means no reason found to not trace.

        """
        modulename = self._name_for_module(frame.f_globals, filename)

        # If the user specified source or include, then that's authoritative
        # about the outer bound of what to measure and we don't have to apply
        # any canned exclusions. If they didn't, then we have to exclude the
        # stdlib and coverage.py directories.
        if self.source_match:
            if self.source_pkgs_match.match(modulename):
                if modulename in self.source_pkgs:
                    self.source_pkgs.remove(modulename)
                return None  # There's no reason to skip this file.

            if not self.source_match.match(filename):
                return "falls outside the --source trees"
        elif self.include_match:
            if not self.include_match.match(filename):
                return "falls outside the --include trees"
        else:
            # If we aren't supposed to trace installed code, then check if this
            # is near the Python standard library and skip it if so.
            if self.pylib_match and self.pylib_match.match(filename):
                return "is in the stdlib"

            # We exclude the coverage code itself, since a little of it will be
            # measured otherwise.
            if self.cover_match and self.cover_match.match(filename):
                return "is part of coverage.py"

        # Check the file against the omit pattern.
        if self.omit_match and self.omit_match.match(filename):
            return "is inside an --omit pattern"

        # No reason found to skip this file.
        return None

    def _should_trace(self, filename, frame):
        """Decide whether to trace execution in `filename`.

        Calls `_should_trace_internal`, and returns the FileDisposition.

        """
        disp = self._should_trace_internal(filename, frame)
        if self.debug.should('trace'):
            self.debug.write(disp.debug_message())
        return disp

    def _check_include_omit_etc(self, filename, frame):
        """Check a filename against the include/omit/etc, rules, verbosely.

        Returns a boolean: True if the file should be traced, False if not.

        """
        reason = self._check_include_omit_etc_internal(filename, frame)
        if self.debug.should('trace'):
            if not reason:
                msg = "Including %r" % (filename,)
            else:
                msg = "Not including %r: %s" % (filename, reason)
            self.debug.write(msg)

        return not reason

    def _warn(self, msg):
        """Use `msg` as a warning."""
        self._warnings.append(msg)
        if self.debug.should("pid"):
            msg = "[%d] %s" % (os.getpid(), msg)
        sys.stderr.write("Coverage.py warning: %s\n" % msg)

    def use_cache(self, usecache):
        """Control the use of a data file (incorrectly called a cache).

        `usecache` is true or false, whether to read and write data on disk.

        """
        self._init()
        self.data.usefile(usecache)

    def load(self):
        """Load previously-collected coverage data from the data file."""
        self._init()
        self.collector.reset()
        self.data.read()

    def start(self):
        """Start measuring code coverage.

        Coverage measurement actually occurs in functions called after `start`
        is invoked.  Statements in the same scope as `start` won't be measured.

        Once you invoke `start`, you must also call `stop` eventually, or your
        process might not shut down cleanly.

        """
        self._init()
        if self.run_suffix:
            # Calling start() means we're running code, so use the run_suffix
            # as the data_suffix when we eventually save the data.
            self.data_suffix = self.run_suffix
        if self._auto_data:
            self.load()

        self.collector.start()
        self._started = True
        self._measured = True

    def stop(self):
        """Stop measuring code coverage."""
        if self._started:
            self.collector.stop()
        self._started = False

    def _atexit(self):
        """Clean up on process shutdown."""
        if self._started:
            self.stop()
        if self._auto_data:
            self.save()

    def erase(self):
        """Erase previously-collected coverage data.

        This removes the in-memory data collected in this session as well as
        discarding the data file.

        """
        self._init()
        self.collector.reset()
        self.data.erase()

    def clear_exclude(self, which='exclude'):
        """Clear the exclude list."""
        self._init()
        setattr(self.config, which + "_list", [])
        self._exclude_regex_stale()

    def exclude(self, regex, which='exclude'):
        """Exclude source lines from execution consideration.

        A number of lists of regular expressions are maintained.  Each list
        selects lines that are treated differently during reporting.

        `which` determines which list is modified.  The "exclude" list selects
        lines that are not considered executable at all.  The "partial" list
        indicates lines with branches that are not taken.

        `regex` is a regular expression.  The regex is added to the specified
        list.  If any of the regexes in the list is found in a line, the line
        is marked for special treatment during reporting.

        """
        self._init()
        excl_list = getattr(self.config, which + "_list")
        excl_list.append(regex)
        self._exclude_regex_stale()

    def _exclude_regex_stale(self):
        """Drop all the compiled exclusion regexes, a list was modified."""
        self._exclude_re.clear()

    def _exclude_regex(self, which):
        """Return a compiled regex for the given exclusion list."""
        if which not in self._exclude_re:
            excl_list = getattr(self.config, which + "_list")
            self._exclude_re[which] = join_regex(excl_list)
        return self._exclude_re[which]

    def get_exclude_list(self, which='exclude'):
        """Return a list of excluded regex patterns.

        `which` indicates which list is desired.  See `exclude` for the lists
        that are available, and their meaning.

        """
        self._init()
        return getattr(self.config, which + "_list")

    def save(self):
        """Save the collected coverage data to the data file."""
        self._init()
        data_suffix = self.data_suffix
        if data_suffix is True:
            # If data_suffix was a simple true value, then make a suffix with
            # plenty of distinguishing information.  We do this here in
            # `save()` at the last minute so that the pid will be correct even
            # if the process forks.
            extra = ""
            if _TEST_NAME_FILE:                             # pragma: debugging
                with open(_TEST_NAME_FILE) as f:
                    test_name = f.read()
                extra = "." + test_name
            data_suffix = "%s%s.%s.%06d" % (
                socket.gethostname(), extra, os.getpid(),
                random.randint(0, 999999)
                )

        self._harvest_data()
        self.data.write(suffix=data_suffix)

    def combine(self):
        """Combine together a number of similarly-named coverage data files.

        All coverage data files whose name starts with `data_file` (from the
        coverage() constructor) will be read, and combined together into the
        current measurements.

        """
        self._init()
        aliases = None
        if self.config.paths:
            aliases = PathAliases(self.file_locator)
            for paths in self.config.paths.values():
                result = paths[0]
                for pattern in paths[1:]:
                    aliases.add(pattern, result)
        self.data.combine_parallel_data(aliases=aliases)

    def _harvest_data(self):
        """Get the collected data and reset the collector.

        Also warn about various problems collecting data.

        """
        self._init()
        if not self._measured:
            return

        # TODO: seems like this parallel structure is getting kinda old...
        self.data.add_line_data(self.collector.get_line_data())
        self.data.add_arc_data(self.collector.get_arc_data())
        self.data.add_plugin_data(self.collector.get_plugin_data())
        self.collector.reset()

        # If there are still entries in the source_pkgs list, then we never
        # encountered those packages.
        if self._warn_unimported_source:
            for pkg in self.source_pkgs:
                if pkg not in sys.modules:
                    self._warn("Module %s was never imported." % pkg)
                elif not (
                    hasattr(sys.modules[pkg], '__file__') and
                    os.path.exists(sys.modules[pkg].__file__)
                ):
                    self._warn("Module %s has no Python source." % pkg)
                else:
                    self._warn(
                        "Module %s was previously imported, "
                        "but not measured." % pkg
                    )

        # Find out if we got any data.
        summary = self.data.summary()
        if not summary and self._warn_no_data:
            self._warn("No data was collected.")

        # Find files that were never executed at all.
        for src in self.source:
            for py_file in find_python_files(src):
                py_file = self.file_locator.canonical_filename(py_file)

                if self.omit_match and self.omit_match.match(py_file):
                    # Turns out this file was omitted, so don't pull it back
                    # in as unexecuted.
                    continue

                self.data.touch_file(py_file)

        self._measured = False

    # Backward compatibility with version 1.
    def analysis(self, morf):
        """Like `analysis2` but doesn't return excluded line numbers."""
        f, s, _, m, mf = self.analysis2(morf)
        return f, s, m, mf

    def analysis2(self, morf):
        """Analyze a module.

        `morf` is a module or a filename.  It will be analyzed to determine
        its coverage statistics.  The return value is a 5-tuple:

        * The filename for the module.
        * A list of line numbers of executable statements.
        * A list of line numbers of excluded statements.
        * A list of line numbers of statements not run (missing from
          execution).
        * A readable formatted string of the missing line numbers.

        The analysis uses the source file itself and the current measured
        coverage data.

        """
        self._init()
        analysis = self._analyze(morf)
        return (
            analysis.filename,
            sorted(analysis.statements),
            sorted(analysis.excluded),
            sorted(analysis.missing),
            analysis.missing_formatted(),
            )

    def _analyze(self, it):
        """Analyze a single morf or code unit.

        Returns an `Analysis` object.

        """
        self._harvest_data()
        if not isinstance(it, FileReporter):
            it = self._get_file_reporter(it)

        return Analysis(self, it)

    def _get_file_reporter(self, morf):
        """Get a FileReporter for a module or filename."""
        plugin = None

        if isinstance(morf, string_class):
            abs_morf = abs_file(morf)
            plugin_name = self.data.plugin_data().get(abs_morf)
            if plugin_name:
                plugin = self.plugins.get(plugin_name)

        if plugin:
            file_reporter = plugin.file_reporter(abs_morf)
            if file_reporter is None:
                raise CoverageException(
                    "Plugin %r did not provide a file reporter for %r." % (
                        plugin._coverage_plugin_name, morf
                    )
                )
        else:
            file_reporter = PythonFileReporter(morf, self)

        # The FileReporter can have a name attribute, but if it doesn't, we'll
        # supply it as the relative path to self.filename.
        if not hasattr(file_reporter, "name"):
            file_reporter.name = self.file_locator.relative_filename(
                file_reporter.filename
            )

        return file_reporter

    def _get_file_reporters(self, morfs=None):
        """Get a list of FileReporters for a list of modules or filenames.

        For each module or filename in `morfs`, find a FileReporter.  Return
        the list of FileReporters.

        If `morfs` is a single module or filename, this returns a list of one
        FileReporter.  If `morfs` is empty or None, then the list of all files
        measured is used to find the FileReporters.

        """
        if not morfs:
            morfs = self.data.measured_files()

        # Be sure we have a list.
        if not isinstance(morfs, (list, tuple)):
            morfs = [morfs]

        file_reporters = []
        for morf in morfs:
            file_reporter = self._get_file_reporter(morf)
            file_reporters.append(file_reporter)

        return file_reporters

    def report(
        self, morfs=None, show_missing=True, ignore_errors=None,
        file=None,                  # pylint: disable=redefined-builtin
        omit=None, include=None, skip_covered=False,
    ):
        """Write a summary report to `file`.

        Each module in `morfs` is listed, with counts of statements, executed
        statements, missing statements, and a list of lines missed.

        `include` is a list of filename patterns.  Files that match will be
        included in the report. Files matching `omit` will not be included in
        the report.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include,
            show_missing=show_missing, skip_covered=skip_covered,
            )
        reporter = SummaryReporter(self, self.config)
        return reporter.report(morfs, outfile=file)

    def annotate(
        self, morfs=None, directory=None, ignore_errors=None,
        omit=None, include=None,
    ):
        """Annotate a list of modules.

        Each module in `morfs` is annotated.  The source is written to a new
        file, named with a ",cover" suffix, with each line prefixed with a
        marker to indicate the coverage of the line.  Covered lines have ">",
        excluded lines have "-", and missing lines have "!".

        See `coverage.report()` for other arguments.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include
            )
        reporter = AnnotateReporter(self, self.config)
        reporter.report(morfs, directory=directory)

    def html_report(self, morfs=None, directory=None, ignore_errors=None,
                    omit=None, include=None, extra_css=None, title=None):
        """Generate an HTML report.

        The HTML is written to `directory`.  The file "index.html" is the
        overview starting point, with links to more detailed pages for
        individual modules.

        `extra_css` is a path to a file of other CSS to apply on the page.
        It will be copied into the HTML directory.

        `title` is a text string (not HTML) to use as the title of the HTML
        report.

        See `coverage.report()` for other arguments.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include,
            html_dir=directory, extra_css=extra_css, html_title=title,
            )
        reporter = HtmlReporter(self, self.config)
        return reporter.report(morfs)

    def xml_report(
        self, morfs=None, outfile=None, ignore_errors=None,
        omit=None, include=None,
    ):
        """Generate an XML report of coverage results.

        The report is compatible with Cobertura reports.

        Each module in `morfs` is included in the report.  `outfile` is the
        path to write the file to, "-" will write to stdout.

        See `coverage.report()` for other arguments.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include,
            xml_output=outfile,
            )
        file_to_close = None
        delete_file = False
        if self.config.xml_output:
            if self.config.xml_output == '-':
                outfile = sys.stdout
            else:
                # Ensure that the output directory is created; done here
                # because this report pre-opens the output file.
                # HTMLReport does this using the Report plumbing because
                # its task is more complex, being multiple files.
                output_dir = os.path.dirname(self.config.xml_output)
                if output_dir and not os.path.isdir(output_dir):
                    os.makedirs(output_dir)
                outfile = open(self.config.xml_output, "w")
                file_to_close = outfile
        try:
            reporter = XmlReporter(self, self.config, self.file_locator)
            return reporter.report(morfs, outfile=outfile)
        except CoverageException:
            delete_file = True
            raise
        finally:
            if file_to_close:
                file_to_close.close()
                if delete_file:
                    file_be_gone(self.config.xml_output)

    def sys_info(self):
        """Return a list of (key, value) pairs showing internal information."""

        import coverage as covmod

        self._init()
        try:
            implementation = platform.python_implementation()
        except AttributeError:
            implementation = "unknown"

        ft_plugins = []
        for ft in self.file_tracing_plugins:
            ft_name = ft._coverage_plugin_name
            if not ft._coverage_enabled:
                ft_name += " (disabled)"
            ft_plugins.append(ft_name)

        info = [
            ('version', covmod.__version__),
            ('coverage', covmod.__file__),
            ('cover_dir', self.cover_dir),
            ('pylib_dirs', self.pylib_dirs),
            ('tracer', self.collector.tracer_name()),
            ('file_tracing_plugins', ft_plugins),
            ('config_files', self.config.attempted_config_files),
            ('configs_read', self.config.config_files),
            ('data_path', self.data.filename),
            ('python', sys.version.replace('\n', '')),
            ('platform', platform.platform()),
            ('implementation', implementation),
            ('executable', sys.executable),
            ('cwd', os.getcwd()),
            ('path', sys.path),
            ('environment', sorted(
                ("%s = %s" % (k, v))
                for k, v in iitems(os.environ)
                if k.startswith(("COV", "PY"))
            )),
            ('command_line', " ".join(getattr(sys, 'argv', ['???']))),
            ]

        matcher_names = [
            'source_match', 'source_pkgs_match',
            'include_match', 'omit_match',
            'cover_match', 'pylib_match',
            ]

        for matcher_name in matcher_names:
            matcher = getattr(self, matcher_name)
            if matcher:
                matcher_info = matcher.info()
            else:
                matcher_info = '-none-'
            info.append((matcher_name, matcher_info))

        return info
Ejemplo n.º 40
0
class coverage(object):

    def __init__(self, data_file = None, data_suffix = None, cover_pylib = None, auto_data = False, timid = None, branch = None, config_file = True, source = None, omit = None, include = None, debug = None, debug_file = None):
        from coverage import __version__
        self._warnings = []
        self.config = CoverageConfig()
        if config_file:
            if config_file is True:
                config_file = '.coveragerc'
            try:
                self.config.from_file(config_file)
            except ValueError:
                _, err, _ = sys.exc_info()
                raise CoverageException("Couldn't read config file %s: %s" % (config_file, err))

        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file
        self.config.from_args(data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include, debug=debug)
        self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)
        self.auto_data = auto_data
        self._exclude_re = {}
        self._exclude_regex_stale()
        self.file_locator = FileLocator()
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)
        self.collector = Collector(self._should_trace, timid=self.config.timid, branch=self.config.branch, warn=self._warn)
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix
        self.data = CoverageData(basename=self.config.data_file, collector='coverage v%s' % __version__, debug=self.debug)
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            for m in (atexit,
             os,
             random,
             socket,
             _structseq):
                if m is not None and hasattr(m, '__file__'):
                    m_dir = self._canonical_dir(m)
                    if m_dir not in self.pylib_dirs:
                        self.pylib_dirs.append(m_dir)

        self.cover_dir = self._canonical_dir(__file__)
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None
        Numbers.set_precision(self.config.precision)
        self._warn_no_data = True
        self._warn_unimported_source = True
        self._started = False
        self._measured = False
        atexit.register(self._atexit)

    def _canonical_dir(self, morf):
        return os.path.split(CodeUnit(morf, self.file_locator).filename)[0]

    def _source_for_file(self, filename):
        if not filename.endswith('.py'):
            if filename[-4:-1] == '.py':
                filename = filename[:-1]
            elif filename.endswith('$py.class'):
                filename = filename[:-9] + '.py'
        return filename

    def _should_trace_with_reason(self, filename, frame):
        if not filename:
            return (None, "empty string isn't a filename")
        if filename.startswith('<'):
            return (None, 'not a real filename')
        self._check_for_packages()
        dunder_file = frame.f_globals.get('__file__')
        if dunder_file:
            filename = self._source_for_file(dunder_file)
        if filename.endswith('$py.class'):
            filename = filename[:-9] + '.py'
        canonical = self.file_locator.canonical_filename(filename)
        if self.source_match:
            if not self.source_match.match(canonical):
                return (None, 'falls outside the --source trees')
        elif self.include_match:
            if not self.include_match.match(canonical):
                return (None, 'falls outside the --include trees')
        else:
            if self.pylib_match and self.pylib_match.match(canonical):
                return (None, 'is in the stdlib')
            if self.cover_match and self.cover_match.match(canonical):
                return (None, 'is part of coverage.py')
        if self.omit_match and self.omit_match.match(canonical):
            return (None, 'is inside an --omit pattern')
        return (canonical, 'because we love you')

    def _should_trace(self, filename, frame):
        canonical, reason = self._should_trace_with_reason(filename, frame)
        if self.debug.should('trace'):
            if not canonical:
                msg = 'Not tracing %r: %s' % (filename, reason)
            else:
                msg = 'Tracing %r' % (filename,)
            self.debug.write(msg)
        return canonical

    def _warn(self, msg):
        self._warnings.append(msg)
        sys.stderr.write('Coverage.py warning: %s\n' % msg)

    def _check_for_packages(self):
        if self.source_pkgs:
            found = []
            for pkg in self.source_pkgs:
                try:
                    mod = sys.modules[pkg]
                except KeyError:
                    continue

                found.append(pkg)
                try:
                    pkg_file = mod.__file__
                except AttributeError:
                    pkg_file = None
                else:
                    d, f = os.path.split(pkg_file)
                    if f.startswith('__init__'):
                        pkg_file = d
                    else:
                        pkg_file = self._source_for_file(pkg_file)
                    pkg_file = self.file_locator.canonical_filename(pkg_file)
                    if not os.path.exists(pkg_file):
                        pkg_file = None

                if pkg_file:
                    self.source.append(pkg_file)
                    self.source_match.add(pkg_file)
                else:
                    self._warn('Module %s has no Python source.' % pkg)

            for pkg in found:
                self.source_pkgs.remove(pkg)

    def use_cache(self, usecache):
        self.data.usefile(usecache)

    def load(self):
        self.collector.reset()
        self.data.read()

    def start(self):
        if self.run_suffix:
            self.data_suffix = self.run_suffix
        if self.auto_data:
            self.load()
        if self.source or self.source_pkgs:
            self.source_match = TreeMatcher(self.source)
        else:
            if self.cover_dir:
                self.cover_match = TreeMatcher([self.cover_dir])
            if self.pylib_dirs:
                self.pylib_match = TreeMatcher(self.pylib_dirs)
        if self.include:
            self.include_match = FnmatchMatcher(self.include)
        if self.omit:
            self.omit_match = FnmatchMatcher(self.omit)
        if self.debug.should('config'):
            self.debug.write('Configuration values:')
            config_info = sorted(self.config.__dict__.items())
            self.debug.write_formatted_info(config_info)
        if self.debug.should('sys'):
            self.debug.write('Debugging info:')
            self.debug.write_formatted_info(self.sysinfo())
        self.collector.start()
        self._started = True
        self._measured = True

    def stop(self):
        self._started = False
        self.collector.stop()

    def _atexit(self):
        if self._started:
            self.stop()
        if self.auto_data:
            self.save()

    def erase(self):
        self.collector.reset()
        self.data.erase()

    def clear_exclude(self, which = 'exclude'):
        setattr(self.config, which + '_list', [])
        self._exclude_regex_stale()

    def exclude(self, regex, which = 'exclude'):
        excl_list = getattr(self.config, which + '_list')
        excl_list.append(regex)
        self._exclude_regex_stale()

    def _exclude_regex_stale(self):
        self._exclude_re.clear()

    def _exclude_regex(self, which):
        if which not in self._exclude_re:
            excl_list = getattr(self.config, which + '_list')
            self._exclude_re[which] = join_regex(excl_list)
        return self._exclude_re[which]

    def get_exclude_list(self, which = 'exclude'):
        return getattr(self.config, which + '_list')

    def save(self):
        data_suffix = self.data_suffix
        if data_suffix is True:
            extra = ''
            if _TEST_NAME_FILE:
                f = open(_TEST_NAME_FILE)
                test_name = f.read()
                f.close()
                extra = '.' + test_name
            data_suffix = '%s%s.%s.%06d' % (socket.gethostname(),
             extra,
             os.getpid(),
             random.randint(0, 999999))
        self._harvest_data()
        self.data.write(suffix=data_suffix)

    def combine(self):
        aliases = None
        if self.config.paths:
            aliases = PathAliases(self.file_locator)
            for paths in self.config.paths.values():
                result = paths[0]
                for pattern in paths[1:]:
                    aliases.add(pattern, result)

        self.data.combine_parallel_data(aliases=aliases)

    def _harvest_data(self):
        if not self._measured:
            return
        self.data.add_line_data(self.collector.get_line_data())
        self.data.add_arc_data(self.collector.get_arc_data())
        self.collector.reset()
        if self._warn_unimported_source:
            for pkg in self.source_pkgs:
                self._warn('Module %s was never imported.' % pkg)

        summary = self.data.summary()
        if not summary and self._warn_no_data:
            self._warn('No data was collected.')
        for src in self.source:
            for py_file in find_python_files(src):
                py_file = self.file_locator.canonical_filename(py_file)
                if self.omit_match and self.omit_match.match(py_file):
                    continue
                self.data.touch_file(py_file)

        self._measured = False

    def analysis(self, morf):
        f, s, _, m, mf = self.analysis2(morf)
        return (f,
         s,
         m,
         mf)

    def analysis2(self, morf):
        analysis = self._analyze(morf)
        return (analysis.filename,
         analysis.statements,
         analysis.excluded,
         analysis.missing,
         analysis.missing_formatted())

    def _analyze(self, it):
        self._harvest_data()
        if not isinstance(it, CodeUnit):
            it = code_unit_factory(it, self.file_locator)[0]
        return Analysis(self, it)

    def report(self, morfs = None, show_missing = True, ignore_errors = None, file = None, omit = None, include = None):
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, show_missing=show_missing)
        reporter = SummaryReporter(self, self.config)
        return reporter.report(morfs, outfile=file)

    def annotate(self, morfs = None, directory = None, ignore_errors = None, omit = None, include = None):
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include)
        reporter = AnnotateReporter(self, self.config)
        reporter.report(morfs, directory=directory)

    def html_report(self, morfs = None, directory = None, ignore_errors = None, omit = None, include = None, extra_css = None, title = None):
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, extra_css=extra_css, html_title=title)
        reporter = HtmlReporter(self, self.config)
        return reporter.report(morfs)

    def xml_report(self, morfs = None, outfile = None, ignore_errors = None, omit = None, include = None):
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile)
        file_to_close = None
        delete_file = False
        if self.config.xml_output:
            if self.config.xml_output == '-':
                outfile = sys.stdout
            else:
                outfile = open(self.config.xml_output, 'w')
                file_to_close = outfile
        try:
            reporter = XmlReporter(self, self.config)
            return reporter.report(morfs, outfile=outfile)
        except CoverageException:
            delete_file = True
            raise
        finally:
            if file_to_close:
                file_to_close.close()
                if delete_file:
                    file_be_gone(self.config.xml_output)

    def sysinfo(self):
        import coverage as covmod
        import platform, re
        try:
            implementation = platform.python_implementation()
        except AttributeError:
            implementation = 'unknown'

        info = [('version', covmod.__version__),
         ('coverage', covmod.__file__),
         ('cover_dir', self.cover_dir),
         ('pylib_dirs', self.pylib_dirs),
         ('tracer', self.collector.tracer_name()),
         ('config_files', self.config.attempted_config_files),
         ('configs_read', self.config.config_files),
         ('data_path', self.data.filename),
         ('python', sys.version.replace('\n', '')),
         ('platform', platform.platform()),
         ('implementation', implementation),
         ('executable', sys.executable),
         ('cwd', os.getcwd()),
         ('path', sys.path),
         ('environment', sorted([ '%s = %s' % (k, v) for k, v in iitems(os.environ) if re.search('^COV|^PY', k) ])),
         ('command_line', ' '.join(getattr(sys, 'argv', ['???'])))]
        if self.source_match:
            info.append(('source_match', self.source_match.info()))
        if self.include_match:
            info.append(('include_match', self.include_match.info()))
        if self.omit_match:
            info.append(('omit_match', self.omit_match.info()))
        if self.cover_match:
            info.append(('cover_match', self.cover_match.info()))
        if self.pylib_match:
            info.append(('pylib_match', self.pylib_match.info()))
        return info
Ejemplo n.º 41
0
    def __init__(self,
                 data_file=None,
                 data_suffix=None,
                 cover_pylib=None,
                 auto_data=False,
                 timid=None,
                 branch=None,
                 config_file=True,
                 source=None,
                 omit=None,
                 include=None):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.

        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.

        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.

        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.

        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        `config_file` determines what config file to read.  If it is a string,
        it is the name of the config file to read.  If it is True, then a
        standard file is read (".coveragerc").  If it is False, then no file is
        read.

        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.

        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.  Each
        will also accept a single string argument.

        """
        from coverage import __version__

        # A record of all the warnings that have been issued.
        self._warnings = []

        # Build our configuration from a number of sources:
        # 1: defaults:
        self.config = CoverageConfig()

        # 2: from the coveragerc file:
        if config_file:
            if config_file is True:
                config_file = ".coveragerc"
            try:
                self.config.from_file(config_file)
            except ValueError:
                _, err, _ = sys.exc_info()
                raise CoverageException("Couldn't read config file %s: %s" %
                                        (config_file, err))

        # 3: from environment variables:
        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file

        # 4: from constructor arguments:
        self.config.from_args(data_file=data_file,
                              cover_pylib=cover_pylib,
                              timid=timid,
                              branch=branch,
                              parallel=bool_or_none(data_suffix),
                              source=source,
                              omit=omit,
                              include=include)

        self.auto_data = auto_data

        # _exclude_re is a dict mapping exclusion list names to compiled
        # regexes.
        self._exclude_re = {}
        self._exclude_regex_stale()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)

        self.collector = Collector(self._should_trace,
                                   timid=self.config.timid,
                                   branch=self.config.branch,
                                   warn=self._warn)

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(basename=self.config.data_file,
                                 collector="coverage v%s" % __version__)

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            # Look at where some standard modules are located. That's the
            # indication for "installed with the interpreter". In some
            # environments (virtualenv, for example), these modules may be
            # spread across a few locations. Look at all the candidate modules
            # we've imported, and take all the different ones.
            for m in (atexit, os, random, socket):
                if hasattr(m, "__file__"):
                    m_dir = self._canonical_dir(m)
                    if m_dir not in self.pylib_dirs:
                        self.pylib_dirs.append(m_dir)

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self._canonical_dir(__file__)

        # The matchers for _should_trace, created when tracing starts.
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None

        # Only _harvest_data once per measurement cycle.
        self._harvested = False

        # Set the reporting precision.
        Numbers.set_precision(self.config.precision)

        # Is it ok for no data to be collected?
        self._warn_no_data = True
        self._started = False

        atexit.register(self._atexit)
Ejemplo n.º 42
0
    def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
                auto_data=False, timid=None, branch=None, config_file=True,
                source=None, omit=None, include=None):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.

        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.

        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.

        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.

        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        `config_file` determines what config file to read.  If it is a string,
        it is the name of the config file to read.  If it is True, then a
        standard file is read (".coveragerc").  If it is False, then no file is
        read.

        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.

        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.

        """
        from coverage import __version__

        # Build our configuration from a number of sources:
        # 1: defaults:
        self.config = CoverageConfig()

        # 2: from the coveragerc file:
        if config_file:
            if config_file is True:
                config_file = ".coveragerc"
            self.config.from_file(config_file)

        # 3: from environment variables:
        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file

        # 4: from constructor arguments:
        self.config.from_args(
            data_file=data_file, cover_pylib=cover_pylib, timid=timid,
            branch=branch, parallel=bool_or_none(data_suffix),
            source=source, omit=omit, include=include
            )

        self.auto_data = auto_data
        self.atexit_registered = False

        self.exclude_re = ""
        self._compile_exclude()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = self._abs_files(self.config.omit)
        self.include = self._abs_files(self.config.include)

        self.collector = Collector(
            self._should_trace, timid=self.config.timid,
            branch=self.config.branch
            )

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(
            basename=self.config.data_file,
            collector="coverage v%s" % __version__
            )

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            # Look at where the "os" module is located.  That's the indication
            # for "installed with the interpreter".
            os_dir = self.canonical_dir(os.__file__)
            self.pylib_dirs.append(os_dir)

            # In a virtualenv, there're actually two lib directories. Find the
            # other one.  This is kind of ad-hoc, but it works.
            random_dir = self.canonical_dir(random.__file__)
            if random_dir != os_dir:
                self.pylib_dirs.append(random_dir)

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self.canonical_dir(__file__)

        # The matchers for _should_trace, created when tracing starts.
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None
Ejemplo n.º 43
0
    def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
                auto_data=False, timid=None, branch=None, config_file=True,
                source=None, omit=None, include=None, debug=None,
                debug_file=None, concurrency=None, plugins=None):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.

        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.

        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.

        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.

        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        `config_file` determines what config file to read.  If it is a string,
        it is the name of the config file to read.  If it is True, then a
        standard file is read (".coveragerc").  If it is False, then no file is
        read.

        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.

        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.  Each
        will also accept a single string argument.

        `debug` is a list of strings indicating what debugging information is
        desired. `debug_file` is the file to write debug messages to,
        defaulting to stderr.

        `concurrency` is a string indicating the concurrency library being used
        in the measured code.  Without this, coverage.py will get incorrect
        results.  Valid strings are "greenlet", "eventlet", "gevent", or
        "thread" (the default).

        `plugins` TODO.

        """
        from coverage import __version__

        # A record of all the warnings that have been issued.
        self._warnings = []

        # Build our configuration from a number of sources:
        # 1: defaults:
        self.config = CoverageConfig()

        # 2: from the .coveragerc or setup.cfg file:
        if config_file:
            did_read_rc = should_read_setupcfg = False
            if config_file is True:
                config_file = ".coveragerc"
                should_read_setupcfg = True
            try:
                did_read_rc = self.config.from_file(config_file)
            except ValueError as err:
                raise CoverageException(
                    "Couldn't read config file %s: %s" % (config_file, err)
                    )

            if not did_read_rc and should_read_setupcfg:
                self.config.from_file("setup.cfg", section_prefix="coverage:")

        # 3: from environment variables:
        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file

        # 4: from constructor arguments:
        self.config.from_args(
            data_file=data_file, cover_pylib=cover_pylib, timid=timid,
            branch=branch, parallel=bool_or_none(data_suffix),
            source=source, omit=omit, include=include, debug=debug,
            concurrency=concurrency, plugins=plugins,
            )

        # Create and configure the debugging controller.
        self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)

        # Load plugins
        self.plugins = Plugins.load_plugins(self.config.plugins, self.config)

        self.trace_judges = []
        for plugin in self.plugins:
            if plugin_implements(plugin, "trace_judge"):
                self.trace_judges.append(plugin)
        self.trace_judges.append(None)      # The Python case.

        self.auto_data = auto_data

        # _exclude_re is a dict mapping exclusion list names to compiled
        # regexes.
        self._exclude_re = {}
        self._exclude_regex_stale()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)

        self.collector = Collector(
            should_trace=self._should_trace,
            check_include=self._tracing_check_include_omit_etc,
            timid=self.config.timid,
            branch=self.config.branch,
            warn=self._warn,
            concurrency=self.config.concurrency,
            )

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(
            basename=self.config.data_file,
            collector="coverage v%s" % __version__,
            debug=self.debug,
            )

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = set()
        if not self.config.cover_pylib:
            # Look at where some standard modules are located. That's the
            # indication for "installed with the interpreter". In some
            # environments (virtualenv, for example), these modules may be
            # spread across a few locations. Look at all the candidate modules
            # we've imported, and take all the different ones.
            for m in (atexit, os, platform, random, socket, _structseq):
                if m is not None and hasattr(m, "__file__"):
                    self.pylib_dirs.add(self._canonical_dir(m))

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self._canonical_dir(__file__)

        # The matchers for _should_trace.
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None

        # Set the reporting precision.
        Numbers.set_precision(self.config.precision)

        # Is it ok for no data to be collected?
        self._warn_no_data = True
        self._warn_unimported_source = True

        # State machine variables:
        # Have we started collecting and not stopped it?
        self._started = False
        # Have we measured some data and not harvested it?
        self._measured = False

        atexit.register(self._atexit)
Ejemplo n.º 44
0
    def __init__(self,
                 data_file=None,
                 data_suffix=None,
                 cover_pylib=None,
                 auto_data=False,
                 timid=None,
                 branch=None,
                 config_file=True,
                 source=None,
                 omit=None,
                 include=None):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.

        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.

        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.

        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.

        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        `config_file` determines what config file to read.  If it is a string,
        it is the name of the config file to read.  If it is True, then a
        standard file is read (".coveragerc").  If it is False, then no file is
        read.

        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.

        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.

        """
        from coverage import __version__

        # Build our configuration from a number of sources:
        # 1: defaults:
        self.config = CoverageConfig()

        # 2: from the coveragerc file:
        if config_file:
            if config_file is True:
                config_file = ".coveragerc"
            self.config.from_file(config_file)

        # 3: from environment variables:
        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file

        # 4: from constructor arguments:
        self.config.from_args(data_file=data_file,
                              cover_pylib=cover_pylib,
                              timid=timid,
                              branch=branch,
                              parallel=bool_or_none(data_suffix),
                              source=source,
                              omit=omit,
                              include=include)

        self.auto_data = auto_data
        self.atexit_registered = False

        self.exclude_re = ""
        self._compile_exclude()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = self._abs_files(self.config.omit)
        self.include = self._abs_files(self.config.include)

        self.collector = Collector(self._should_trace,
                                   timid=self.config.timid,
                                   branch=self.config.branch)

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(basename=self.config.data_file,
                                 collector="coverage v%s" % __version__)

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            # Look at where the "os" module is located.  That's the indication
            # for "installed with the interpreter".
            os_dir = self.canonical_dir(os.__file__)
            self.pylib_dirs.append(os_dir)

            # In a virtualenv, there're actually two lib directories. Find the
            # other one.  This is kind of ad-hoc, but it works.
            random_dir = self.canonical_dir(random.__file__)
            if random_dir != os_dir:
                self.pylib_dirs.append(random_dir)

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self.canonical_dir(__file__)

        # The matchers for _should_trace, created when tracing starts.
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None
Ejemplo n.º 45
0
    def __init__(self,
                 data_file=None,
                 data_suffix=None,
                 cover_pylib=None,
                 auto_data=False,
                 timid=None,
                 branch=None,
                 config_file=True,
                 source=None,
                 omit=None,
                 include=None,
                 debug=None,
                 debug_file=None):
        from coverage import __version__
        self._warnings = []
        self.config = CoverageConfig()
        if config_file:
            if config_file is True:
                config_file = '.coveragerc'
            try:
                self.config.from_file(config_file)
            except ValueError:
                _, err, _ = sys.exc_info()
                raise CoverageException("Couldn't read config file %s: %s" %
                                        (config_file, err))

        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file
        self.config.from_args(data_file=data_file,
                              cover_pylib=cover_pylib,
                              timid=timid,
                              branch=branch,
                              parallel=bool_or_none(data_suffix),
                              source=source,
                              omit=omit,
                              include=include,
                              debug=debug)
        self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)
        self.auto_data = auto_data
        self._exclude_re = {}
        self._exclude_regex_stale()
        self.file_locator = FileLocator()
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)
        self.collector = Collector(self._should_trace,
                                   timid=self.config.timid,
                                   branch=self.config.branch,
                                   warn=self._warn)
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix
        self.data = CoverageData(basename=self.config.data_file,
                                 collector='coverage v%s' % __version__,
                                 debug=self.debug)
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            for m in (atexit, os, random, socket, _structseq):
                if m is not None and hasattr(m, '__file__'):
                    m_dir = self._canonical_dir(m)
                    if m_dir not in self.pylib_dirs:
                        self.pylib_dirs.append(m_dir)

        self.cover_dir = self._canonical_dir(__file__)
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None
        Numbers.set_precision(self.config.precision)
        self._warn_no_data = True
        self._warn_unimported_source = True
        self._started = False
        self._measured = False
        atexit.register(self._atexit)
Ejemplo n.º 46
0
class coverage(object):
    """Programmatic access to Coverage.

    To use::

        from coverage import coverage

        cov = coverage()
        cov.start()
        #.. blah blah (run your code) blah blah ..
        cov.stop()
        cov.html_report(directory='covhtml')

    """
    def __init__(self,
                 data_file=None,
                 data_suffix=None,
                 cover_pylib=None,
                 auto_data=False,
                 timid=None,
                 branch=None,
                 config_file=True,
                 source=None,
                 omit=None,
                 include=None):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.

        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.

        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.

        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.

        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.

        `config_file` determines what config file to read.  If it is a string,
        it is the name of the config file to read.  If it is True, then a
        standard file is read (".coveragerc").  If it is False, then no file is
        read.

        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.

        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.

        """
        from coverage import __version__

        # Build our configuration from a number of sources:
        # 1: defaults:
        self.config = CoverageConfig()

        # 2: from the coveragerc file:
        if config_file:
            if config_file is True:
                config_file = ".coveragerc"
            self.config.from_file(config_file)

        # 3: from environment variables:
        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file

        # 4: from constructor arguments:
        self.config.from_args(data_file=data_file,
                              cover_pylib=cover_pylib,
                              timid=timid,
                              branch=branch,
                              parallel=bool_or_none(data_suffix),
                              source=source,
                              omit=omit,
                              include=include)

        self.auto_data = auto_data
        self.atexit_registered = False

        self.exclude_re = ""
        self._compile_exclude()

        self.file_locator = FileLocator()

        # The source argument can be directories or package names.
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = self._abs_files(self.config.omit)
        self.include = self._abs_files(self.config.include)

        self.collector = Collector(self._should_trace,
                                   timid=self.config.timid,
                                   branch=self.config.branch)

        # Suffixes are a bit tricky.  We want to use the data suffix only when
        # collecting data, not when combining data.  So we save it as
        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
        # find that we are collecting data later.
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                # if data_suffix=True, use .machinename.pid.random
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix

        # Create the data file.  We do this at construction time so that the
        # data file will be written into the directory where the process
        # started rather than wherever the process eventually chdir'd to.
        self.data = CoverageData(basename=self.config.data_file,
                                 collector="coverage v%s" % __version__)

        # The dirs for files considered "installed with the interpreter".
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            # Look at where the "os" module is located.  That's the indication
            # for "installed with the interpreter".
            os_dir = self.canonical_dir(os.__file__)
            self.pylib_dirs.append(os_dir)

            # In a virtualenv, there're actually two lib directories. Find the
            # other one.  This is kind of ad-hoc, but it works.
            random_dir = self.canonical_dir(random.__file__)
            if random_dir != os_dir:
                self.pylib_dirs.append(random_dir)

        # To avoid tracing the coverage code itself, we skip anything located
        # where we are.
        self.cover_dir = self.canonical_dir(__file__)

        # The matchers for _should_trace, created when tracing starts.
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None

    def canonical_dir(self, f):
        """Return the canonical directory of the file `f`."""
        return os.path.split(self.file_locator.canonical_filename(f))[0]

    def _source_for_file(self, filename):
        """Return the source file for `filename`."""
        if not filename.endswith(".py"):
            if filename[-4:-1] == ".py":
                filename = filename[:-1]
        return filename

    def _should_trace(self, filename, frame):
        """Decide whether to trace execution in `filename`

        This function is called from the trace function.  As each new file name
        is encountered, this function determines whether it is traced or not.

        Returns a canonicalized filename if it should be traced, False if it
        should not.

        """
        if filename.startswith('<'):
            # Lots of non-file execution is represented with artificial
            # filenames like "<string>", "<doctest readme.txt[0]>", or
            # "<exec_function>".  Don't ever trace these executions, since we
            # can't do anything with the data later anyway.
            return False

        self._check_for_packages()

        # Compiled Python files have two filenames: frame.f_code.co_filename is
        # the filename at the time the .pyc was compiled.  The second name
        # is __file__, which is where the .pyc was actually loaded from.  Since
        # .pyc files can be moved after compilation (for example, by being
        # installed), we look for __file__ in the frame and prefer it to the
        # co_filename value.
        dunder_file = frame.f_globals.get('__file__')
        if dunder_file:
            filename = self._source_for_file(dunder_file)
        canonical = self.file_locator.canonical_filename(filename)

        # If the user specified source, then that's authoritative about what to
        # measure.  If they didn't, then we have to exclude the stdlib and
        # coverage.py directories.
        if self.source_match:
            if not self.source_match.match(canonical):
                return False
        else:
            # If we aren't supposed to trace installed code, then check if this
            # is near the Python standard library and skip it if so.
            if self.pylib_match and self.pylib_match.match(canonical):
                return False

            # We exclude the coverage code itself, since a little of it will be
            # measured otherwise.
            if self.cover_match and self.cover_match.match(canonical):
                return False

        # Check the file against the include and omit patterns.
        if self.include_match and not self.include_match.match(canonical):
            return False
        if self.omit_match and self.omit_match.match(canonical):
            return False

        return canonical

    # To log what should_trace returns, change this to "if 1:"
    if 0:
        _real_should_trace = _should_trace

        def _should_trace(self, filename, frame):  # pylint: disable-msg=E0102
            """A logging decorator around the real _should_trace function."""
            ret = self._real_should_trace(filename, frame)
            print("should_trace: %r -> %r" % (filename, ret))
            return ret

    def _warn(self, msg):
        """Use `msg` as a warning."""
        sys.stderr.write("Warning: " + msg + "\n")

    def _abs_files(self, files):
        """Return a list of absolute file names for the names in `files`."""
        files = files or []
        return [self.file_locator.abs_file(f) for f in files]

    def _check_for_packages(self):
        """Update the source_match matcher with latest imported packages."""
        # Our self.source_pkgs attribute is a list of package names we want to
        # measure.  Each time through here, we see if we've imported any of
        # them yet.  If so, we add its file to source_match, and we don't have
        # to look for that package any more.
        if self.source_pkgs:
            found = []
            for pkg in self.source_pkgs:
                try:
                    mod = sys.modules[pkg]
                except KeyError:
                    continue

                found.append(pkg)

                try:
                    pkg_file = mod.__file__
                except AttributeError:
                    self._warn("Module %s has no python source." % pkg)
                else:
                    d, f = os.path.split(pkg_file)
                    if f.startswith('__init__.'):
                        # This is actually a package, return the directory.
                        pkg_file = d
                    else:
                        pkg_file = self._source_for_file(pkg_file)
                    pkg_file = self.file_locator.canonical_filename(pkg_file)
                    self.source_match.add(pkg_file)

            for pkg in found:
                self.source_pkgs.remove(pkg)

    def use_cache(self, usecache):
        """Control the use of a data file (incorrectly called a cache).

        `usecache` is true or false, whether to read and write data on disk.

        """
        self.data.usefile(usecache)

    def load(self):
        """Load previously-collected coverage data from the data file."""
        self.collector.reset()
        self.data.read()

    def start(self):
        """Start measuring code coverage."""
        if self.run_suffix:
            # Calling start() means we're running code, so use the run_suffix
            # as the data_suffix when we eventually save the data.
            self.data_suffix = self.run_suffix
        if self.auto_data:
            self.load()
            # Save coverage data when Python exits.
            if not self.atexit_registered:
                atexit.register(self.save)
                self.atexit_registered = True

        # Create the matchers we need for _should_trace
        if self.source or self.source_pkgs:
            self.source_match = TreeMatcher(self.source)
        else:
            if self.cover_dir:
                self.cover_match = TreeMatcher([self.cover_dir])
            if self.pylib_dirs:
                self.pylib_match = TreeMatcher(self.pylib_dirs)
        if self.include:
            self.include_match = FnmatchMatcher(self.include)
        if self.omit:
            self.omit_match = FnmatchMatcher(self.omit)

        self.collector.start()

    def stop(self):
        """Stop measuring code coverage."""
        self.collector.stop()

        # If there are still entries in the source_pkgs list, then we never
        # encountered those packages.
        for pkg in self.source_pkgs:
            self._warn("Source module %s was never encountered." % pkg)

        self._harvest_data()

        # Find out if we got any data.
        summary = self.data.summary()
        if not summary:
            self._warn("No data was collected.")

    def erase(self):
        """Erase previously-collected coverage data.

        This removes the in-memory data collected in this session as well as
        discarding the data file.

        """
        self.collector.reset()
        self.data.erase()

    def clear_exclude(self):
        """Clear the exclude list."""
        self.config.exclude_list = []
        self.exclude_re = ""

    def exclude(self, regex):
        """Exclude source lines from execution consideration.

        `regex` is a regular expression.  Lines matching this expression are
        not considered executable when reporting code coverage.  A list of
        regexes is maintained; this function adds a new regex to the list.
        Matching any of the regexes excludes a source line.

        """
        self.config.exclude_list.append(regex)
        self._compile_exclude()

    def _compile_exclude(self):
        """Build the internal usable form of the exclude list."""
        self.exclude_re = "(" + ")|(".join(self.config.exclude_list) + ")"

    def get_exclude_list(self):
        """Return the list of excluded regex patterns."""
        return self.config.exclude_list

    def save(self):
        """Save the collected coverage data to the data file."""
        data_suffix = self.data_suffix
        if data_suffix and not isinstance(data_suffix, string_class):
            # If data_suffix was a simple true value, then make a suffix with
            # plenty of distinguishing information.  We do this here in
            # `save()` at the last minute so that the pid will be correct even
            # if the process forks.
            data_suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(),
                                          random.randint(0, 99999))

        self._harvest_data()
        self.data.write(suffix=data_suffix)

    def combine(self):
        """Combine together a number of similarly-named coverage data files.

        All coverage data files whose name starts with `data_file` (from the
        coverage() constructor) will be read, and combined together into the
        current measurements.

        """
        self.data.combine_parallel_data()

    def _harvest_data(self):
        """Get the collected data and reset the collector."""
        self.data.add_line_data(self.collector.get_line_data())
        self.data.add_arc_data(self.collector.get_arc_data())
        self.collector.reset()

    # Backward compatibility with version 1.
    def analysis(self, morf):
        """Like `analysis2` but doesn't return excluded line numbers."""
        f, s, _, m, mf = self.analysis2(morf)
        return f, s, m, mf

    def analysis2(self, morf):
        """Analyze a module.

        `morf` is a module or a filename.  It will be analyzed to determine
        its coverage statistics.  The return value is a 5-tuple:

        * The filename for the module.
        * A list of line numbers of executable statements.
        * A list of line numbers of excluded statements.
        * A list of line numbers of statements not run (missing from
          execution).
        * A readable formatted string of the missing line numbers.

        The analysis uses the source file itself and the current measured
        coverage data.

        """
        analysis = self._analyze(morf)
        return (analysis.filename, analysis.statements, analysis.excluded,
                analysis.missing, analysis.missing_formatted())

    def _analyze(self, it):
        """Analyze a single morf or code unit.

        Returns an `Analysis` object.

        """
        if not isinstance(it, CodeUnit):
            it = code_unit_factory(it, self.file_locator)[0]

        return Analysis(self, it)

    def report(
            self,
            morfs=None,
            show_missing=True,
            ignore_errors=None,
            file=None,  # pylint: disable-msg=W0622
            omit=None,
            include=None):
        """Write a summary report to `file`.

        Each module in `morfs` is listed, with counts of statements, executed
        statements, missing statements, and a list of lines missed.

        `include` is a list of filename patterns.  Modules whose filenames
        match those patterns will be included in the report. Modules matching
        `omit` will not be included in the report.

        """
        self.config.from_args(ignore_errors=ignore_errors,
                              omit=omit,
                              include=include)
        reporter = SummaryReporter(self, show_missing,
                                   self.config.ignore_errors)
        reporter.report(morfs,
                        outfile=file,
                        omit=self.config.omit,
                        include=self.config.include)

    def annotate(self,
                 morfs=None,
                 directory=None,
                 ignore_errors=None,
                 omit=None,
                 include=None):
        """Annotate a list of modules.

        Each module in `morfs` is annotated.  The source is written to a new
        file, named with a ",cover" suffix, with each line prefixed with a
        marker to indicate the coverage of the line.  Covered lines have ">",
        excluded lines have "-", and missing lines have "!".

        See `coverage.report()` for other arguments.

        """
        self.config.from_args(ignore_errors=ignore_errors,
                              omit=omit,
                              include=include)
        reporter = AnnotateReporter(self, self.config.ignore_errors)
        reporter.report(morfs,
                        directory=directory,
                        omit=self.config.omit,
                        include=self.config.include)

    def html_report(self,
                    morfs=None,
                    directory=None,
                    ignore_errors=None,
                    omit=None,
                    include=None):
        """Generate an HTML report.

        See `coverage.report()` for other arguments.

        """
        self.config.from_args(
            ignore_errors=ignore_errors,
            omit=omit,
            include=include,
            html_dir=directory,
        )
        reporter = HtmlReporter(self, self.config.ignore_errors)
        reporter.report(morfs,
                        directory=self.config.html_dir,
                        omit=self.config.omit,
                        include=self.config.include)

    def xml_report(self,
                   morfs=None,
                   outfile=None,
                   ignore_errors=None,
                   omit=None,
                   include=None):
        """Generate an XML report of coverage results.

        The report is compatible with Cobertura reports.

        Each module in `morfs` is included in the report.  `outfile` is the
        path to write the file to, "-" will write to stdout.

        See `coverage.report()` for other arguments.

        """
        self.config.from_args(
            ignore_errors=ignore_errors,
            omit=omit,
            include=include,
            xml_output=outfile,
        )
        file_to_close = None
        if self.config.xml_output:
            if self.config.xml_output == '-':
                outfile = sys.stdout
            else:
                outfile = open(self.config.xml_output, "w")
                file_to_close = outfile
        try:
            reporter = XmlReporter(self, self.config.ignore_errors)
            reporter.report(morfs,
                            omit=self.config.omit,
                            include=self.config.include,
                            outfile=outfile)
        finally:
            if file_to_close:
                file_to_close.close()

    def sysinfo(self):
        """Return a list of (key, value) pairs showing internal information."""

        import coverage as covmod
        import platform, re

        info = [
            ('version', covmod.__version__),
            ('coverage', covmod.__file__),
            ('cover_dir', self.cover_dir),
            ('pylib_dirs', self.pylib_dirs),
            ('tracer', self.collector.tracer_name()),
            ('data_path', self.data.filename),
            ('python', sys.version.replace('\n', '')),
            ('platform', platform.platform()),
            ('cwd', os.getcwd()),
            ('path', sys.path),
            ('environment', [("%s = %s" % (k, v))
                             for k, v in os.environ.items()
                             if re.search("^COV|^PY", k)]),
        ]
        return info
Ejemplo n.º 47
0
class coverage(object):
    """Programmatic access to coverage.py.
    
    To use::
    
        from coverage import coverage
    
        cov = coverage()
        cov.start()
        #.. call your code ..
        cov.stop()
        cov.html_report(directory='covhtml')
    
    """
    def __init__(self,
                 data_file=None,
                 data_suffix=None,
                 cover_pylib=None,
                 auto_data=False,
                 timid=None,
                 branch=None,
                 config_file=True,
                 source=None,
                 omit=None,
                 include=None,
                 debug=None,
                 debug_file=None):
        """
        `data_file` is the base name of the data file to use, defaulting to
        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
        create the final file name.  If `data_suffix` is simply True, then a
        suffix is created with the machine and process identity included.
        
        `cover_pylib` is a boolean determining whether Python code installed
        with the Python interpreter is measured.  This includes the Python
        standard library and any packages installed with the interpreter.
        
        If `auto_data` is true, then any existing data file will be read when
        coverage measurement starts, and data will be saved automatically when
        measurement stops.
        
        If `timid` is true, then a slower and simpler trace function will be
        used.  This is important for some environments where manipulation of
        tracing functions breaks the faster trace function.
        
        If `branch` is true, then branch coverage will be measured in addition
        to the usual statement coverage.
        
        `config_file` determines what config file to read.  If it is a string,
        it is the name of the config file to read.  If it is True, then a
        standard file is read (".coveragerc").  If it is False, then no file is
        read.
        
        `source` is a list of file paths or package names.  Only code located
        in the trees indicated by the file paths or package names will be
        measured.
        
        `include` and `omit` are lists of filename patterns. Files that match
        `include` will be measured, files that match `omit` will not.  Each
        will also accept a single string argument.
        
        `debug` is a list of strings indicating what debugging information is
        desired. `debug_file` is the file to write debug messages to,
        defaulting to stderr.
        
        """
        from coverage import __version__
        self._warnings = []
        self.config = CoverageConfig()
        if config_file:
            if config_file is True:
                config_file = '.coveragerc'
            try:
                self.config.from_file(config_file)
            except ValueError:
                _, err, _ = sys.exc_info()
                raise CoverageException("Couldn't read config file %s: %s" %
                                        (config_file, err))

        self.config.from_environment('COVERAGE_OPTIONS')
        env_data_file = os.environ.get('COVERAGE_FILE')
        if env_data_file:
            self.config.data_file = env_data_file
        self.config.from_args(data_file=data_file,
                              cover_pylib=cover_pylib,
                              timid=timid,
                              branch=branch,
                              parallel=bool_or_none(data_suffix),
                              source=source,
                              omit=omit,
                              include=include,
                              debug=debug)
        self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)
        self.auto_data = auto_data
        self._exclude_re = {}
        self._exclude_regex_stale()
        self.file_locator = FileLocator()
        self.source = []
        self.source_pkgs = []
        for src in self.config.source or []:
            if os.path.exists(src):
                self.source.append(self.file_locator.canonical_filename(src))
            else:
                self.source_pkgs.append(src)

        self.omit = prep_patterns(self.config.omit)
        self.include = prep_patterns(self.config.include)
        self.collector = Collector(self._should_trace,
                                   timid=self.config.timid,
                                   branch=self.config.branch,
                                   warn=self._warn)
        if data_suffix or self.config.parallel:
            if not isinstance(data_suffix, string_class):
                data_suffix = True
        else:
            data_suffix = None
        self.data_suffix = None
        self.run_suffix = data_suffix
        self.data = CoverageData(basename=self.config.data_file,
                                 collector='coverage v%s' % __version__,
                                 debug=self.debug)
        self.pylib_dirs = []
        if not self.config.cover_pylib:
            for m in (atexit, os, random, socket, _structseq):
                if m is not None and hasattr(m, '__file__'):
                    m_dir = self._canonical_dir(m)
                    if m_dir not in self.pylib_dirs:
                        self.pylib_dirs.append(m_dir)

        self.cover_dir = self._canonical_dir(__file__)
        self.source_match = None
        self.pylib_match = self.cover_match = None
        self.include_match = self.omit_match = None
        Numbers.set_precision(self.config.precision)
        self._warn_no_data = True
        self._warn_unimported_source = True
        self._started = False
        self._measured = False
        atexit.register(self._atexit)

    def _canonical_dir(self, morf):
        """Return the canonical directory of the module or file `morf`."""
        return os.path.split(CodeUnit(morf, self.file_locator).filename)[0]

    def _source_for_file(self, filename):
        """Return the source file for `filename`."""
        if not filename.endswith('.py'):
            if filename[-4:-1] == '.py':
                filename = filename[:-1]
            elif filename.endswith('$py.class'):
                filename = filename[:-9] + '.py'
        return filename

    def _should_trace_with_reason(self, filename, frame):
        """Decide whether to trace execution in `filename`, with a reason.
        
        This function is called from the trace function.  As each new file name
        is encountered, this function determines whether it is traced or not.
        
        Returns a pair of values:  the first indicates whether the file should
        be traced: it's a canonicalized filename if it should be traced, None
        if it should not.  The second value is a string, the resason for the
        decision.
        
        """
        if not filename:
            return (None, "empty string isn't a filename")
        if filename.startswith('<'):
            return (None, 'not a real filename')
        self._check_for_packages()
        dunder_file = frame.f_globals.get('__file__')
        if dunder_file:
            filename = self._source_for_file(dunder_file)
        if filename.endswith('$py.class'):
            filename = filename[:-9] + '.py'
        canonical = self.file_locator.canonical_filename(filename)
        if self.source_match:
            if not self.source_match.match(canonical):
                return (None, 'falls outside the --source trees')
        elif self.include_match:
            if not self.include_match.match(canonical):
                return (None, 'falls outside the --include trees')
        else:
            if self.pylib_match and self.pylib_match.match(canonical):
                return (None, 'is in the stdlib')
            if self.cover_match and self.cover_match.match(canonical):
                return (None, 'is part of coverage.py')
        if self.omit_match and self.omit_match.match(canonical):
            return (None, 'is inside an --omit pattern')
        return (canonical, 'because we love you')

    def _should_trace(self, filename, frame):
        """Decide whether to trace execution in `filename`.
        
        Calls `_should_trace_with_reason`, and returns just the decision.
        
        """
        canonical, reason = self._should_trace_with_reason(filename, frame)
        if self.debug.should('trace'):
            if not canonical:
                msg = 'Not tracing %r: %s' % (filename, reason)
            else:
                msg = 'Tracing %r' % (filename, )
            self.debug.write(msg)
        return canonical

    def _warn(self, msg):
        """Use `msg` as a warning."""
        self._warnings.append(msg)
        sys.stderr.write('Coverage.py warning: %s\n' % msg)

    def _check_for_packages(self):
        """Update the source_match matcher with latest imported packages."""
        if self.source_pkgs:
            found = []
            for pkg in self.source_pkgs:
                try:
                    mod = sys.modules[pkg]
                except KeyError:
                    continue

                found.append(pkg)
                try:
                    pkg_file = mod.__file__
                except AttributeError:
                    pkg_file = None
                else:
                    d, f = os.path.split(pkg_file)
                    if f.startswith('__init__'):
                        pkg_file = d
                    else:
                        pkg_file = self._source_for_file(pkg_file)
                    pkg_file = self.file_locator.canonical_filename(pkg_file)
                    if not os.path.exists(pkg_file):
                        pkg_file = None

                if pkg_file:
                    self.source.append(pkg_file)
                    self.source_match.add(pkg_file)
                else:
                    self._warn('Module %s has no Python source.' % pkg)

            for pkg in found:
                self.source_pkgs.remove(pkg)

    def use_cache(self, usecache):
        """Control the use of a data file (incorrectly called a cache).
        
        `usecache` is true or false, whether to read and write data on disk.
        
        """
        self.data.usefile(usecache)

    def load(self):
        """Load previously-collected coverage data from the data file."""
        self.collector.reset()
        self.data.read()

    def start(self):
        """Start measuring code coverage.
        
        Coverage measurement actually occurs in functions called after `start`
        is invoked.  Statements in the same scope as `start` won't be measured.
        
        Once you invoke `start`, you must also call `stop` eventually, or your
        process might not shut down cleanly.
        
        """
        if self.run_suffix:
            self.data_suffix = self.run_suffix
        if self.auto_data:
            self.load()
        if self.source or self.source_pkgs:
            self.source_match = TreeMatcher(self.source)
        else:
            if self.cover_dir:
                self.cover_match = TreeMatcher([self.cover_dir])
            if self.pylib_dirs:
                self.pylib_match = TreeMatcher(self.pylib_dirs)
        if self.include:
            self.include_match = FnmatchMatcher(self.include)
        if self.omit:
            self.omit_match = FnmatchMatcher(self.omit)
        if self.debug.should('config'):
            self.debug.write('Configuration values:')
            config_info = sorted(self.config.__dict__.items())
            self.debug.write_formatted_info(config_info)
        if self.debug.should('sys'):
            self.debug.write('Debugging info:')
            self.debug.write_formatted_info(self.sysinfo())
        self.collector.start()
        self._started = True
        self._measured = True

    def stop(self):
        """Stop measuring code coverage."""
        self._started = False
        self.collector.stop()

    def _atexit(self):
        """Clean up on process shutdown."""
        if self._started:
            self.stop()
        if self.auto_data:
            self.save()

    def erase(self):
        """Erase previously-collected coverage data.
        
        This removes the in-memory data collected in this session as well as
        discarding the data file.
        
        """
        self.collector.reset()
        self.data.erase()

    def clear_exclude(self, which='exclude'):
        """Clear the exclude list."""
        setattr(self.config, which + '_list', [])
        self._exclude_regex_stale()

    def exclude(self, regex, which='exclude'):
        """Exclude source lines from execution consideration.
        
        A number of lists of regular expressions are maintained.  Each list
        selects lines that are treated differently during reporting.
        
        `which` determines which list is modified.  The "exclude" list selects
        lines that are not considered executable at all.  The "partial" list
        indicates lines with branches that are not taken.
        
        `regex` is a regular expression.  The regex is added to the specified
        list.  If any of the regexes in the list is found in a line, the line
        is marked for special treatment during reporting.
        
        """
        excl_list = getattr(self.config, which + '_list')
        excl_list.append(regex)
        self._exclude_regex_stale()

    def _exclude_regex_stale(self):
        """Drop all the compiled exclusion regexes, a list was modified."""
        self._exclude_re.clear()

    def _exclude_regex(self, which):
        """Return a compiled regex for the given exclusion list."""
        if which not in self._exclude_re:
            excl_list = getattr(self.config, which + '_list')
            self._exclude_re[which] = join_regex(excl_list)
        return self._exclude_re[which]

    def get_exclude_list(self, which='exclude'):
        """Return a list of excluded regex patterns.
        
        `which` indicates which list is desired.  See `exclude` for the lists
        that are available, and their meaning.
        
        """
        return getattr(self.config, which + '_list')

    def save(self):
        """Save the collected coverage data to the data file."""
        data_suffix = self.data_suffix
        if data_suffix is True:
            extra = ''
            if _TEST_NAME_FILE:
                f = open(_TEST_NAME_FILE)
                test_name = f.read()
                f.close()
                extra = '.' + test_name
            data_suffix = '%s%s.%s.%06d' % (socket.gethostname(), extra,
                                            os.getpid(),
                                            random.randint(0, 999999))
        self._harvest_data()
        self.data.write(suffix=data_suffix)

    def combine(self):
        """Combine together a number of similarly-named coverage data files.
        
        All coverage data files whose name starts with `data_file` (from the
        coverage() constructor) will be read, and combined together into the
        current measurements.
        
        """
        aliases = None
        if self.config.paths:
            aliases = PathAliases(self.file_locator)
            for paths in self.config.paths.values():
                result = paths[0]
                for pattern in paths[1:]:
                    aliases.add(pattern, result)

        self.data.combine_parallel_data(aliases=aliases)

    def _harvest_data(self):
        """Get the collected data and reset the collector.
        
        Also warn about various problems collecting data.
        
        """
        if not self._measured:
            return
        self.data.add_line_data(self.collector.get_line_data())
        self.data.add_arc_data(self.collector.get_arc_data())
        self.collector.reset()
        if self._warn_unimported_source:
            for pkg in self.source_pkgs:
                self._warn('Module %s was never imported.' % pkg)

        summary = self.data.summary()
        if not summary and self._warn_no_data:
            self._warn('No data was collected.')
        for src in self.source:
            for py_file in find_python_files(src):
                py_file = self.file_locator.canonical_filename(py_file)
                if self.omit_match and self.omit_match.match(py_file):
                    continue
                self.data.touch_file(py_file)

        self._measured = False

    def analysis(self, morf):
        """Like `analysis2` but doesn't return excluded line numbers."""
        f, s, _, m, mf = self.analysis2(morf)
        return (f, s, m, mf)

    def analysis2(self, morf):
        """Analyze a module.
        
        `morf` is a module or a filename.  It will be analyzed to determine
        its coverage statistics.  The return value is a 5-tuple:
        
        * The filename for the module.
        * A list of line numbers of executable statements.
        * A list of line numbers of excluded statements.
        * A list of line numbers of statements not run (missing from
          execution).
        * A readable formatted string of the missing line numbers.
        
        The analysis uses the source file itself and the current measured
        coverage data.
        
        """
        analysis = self._analyze(morf)
        return (analysis.filename, analysis.statements, analysis.excluded,
                analysis.missing, analysis.missing_formatted())

    def _analyze(self, it):
        """Analyze a single morf or code unit.
        
        Returns an `Analysis` object.
        
        """
        self._harvest_data()
        if not isinstance(it, CodeUnit):
            it = code_unit_factory(it, self.file_locator)[0]
        return Analysis(self, it)

    def report(self,
               morfs=None,
               show_missing=True,
               ignore_errors=None,
               file=None,
               omit=None,
               include=None):
        """Write a summary report to `file`.
        
        Each module in `morfs` is listed, with counts of statements, executed
        statements, missing statements, and a list of lines missed.
        
        `include` is a list of filename patterns.  Modules whose filenames
        match those patterns will be included in the report. Modules matching
        `omit` will not be included in the report.
        
        Returns a float, the total percentage covered.
        
        """
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors,
                              omit=omit,
                              include=include,
                              show_missing=show_missing)
        reporter = SummaryReporter(self, self.config)
        return reporter.report(morfs, outfile=file)

    def annotate(self,
                 morfs=None,
                 directory=None,
                 ignore_errors=None,
                 omit=None,
                 include=None):
        """Annotate a list of modules.
        
        Each module in `morfs` is annotated.  The source is written to a new
        file, named with a ",cover" suffix, with each line prefixed with a
        marker to indicate the coverage of the line.  Covered lines have ">",
        excluded lines have "-", and missing lines have "!".
        
        See `coverage.report()` for other arguments.
        
        """
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors,
                              omit=omit,
                              include=include)
        reporter = AnnotateReporter(self, self.config)
        reporter.report(morfs, directory=directory)

    def html_report(self,
                    morfs=None,
                    directory=None,
                    ignore_errors=None,
                    omit=None,
                    include=None,
                    extra_css=None,
                    title=None):
        """Generate an HTML report.
        
        The HTML is written to `directory`.  The file "index.html" is the
        overview starting point, with links to more detailed pages for
        individual modules.
        
        `extra_css` is a path to a file of other CSS to apply on the page.
        It will be copied into the HTML directory.
        
        `title` is a text string (not HTML) to use as the title of the HTML
        report.
        
        See `coverage.report()` for other arguments.
        
        Returns a float, the total percentage covered.
        
        """
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors,
                              omit=omit,
                              include=include,
                              html_dir=directory,
                              extra_css=extra_css,
                              html_title=title)
        reporter = HtmlReporter(self, self.config)
        return reporter.report(morfs)

    def xml_report(self,
                   morfs=None,
                   outfile=None,
                   ignore_errors=None,
                   omit=None,
                   include=None):
        """Generate an XML report of coverage results.
        
        The report is compatible with Cobertura reports.
        
        Each module in `morfs` is included in the report.  `outfile` is the
        path to write the file to, "-" will write to stdout.
        
        See `coverage.report()` for other arguments.
        
        Returns a float, the total percentage covered.
        
        """
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors,
                              omit=omit,
                              include=include,
                              xml_output=outfile)
        file_to_close = None
        delete_file = False
        if self.config.xml_output:
            if self.config.xml_output == '-':
                outfile = sys.stdout
            else:
                outfile = open(self.config.xml_output, 'w')
                file_to_close = outfile
        try:
            reporter = XmlReporter(self, self.config)
            return reporter.report(morfs, outfile=outfile)
        except CoverageException:
            delete_file = True
            raise
        finally:
            if file_to_close:
                file_to_close.close()
                if delete_file:
                    file_be_gone(self.config.xml_output)

    def sysinfo(self):
        """Return a list of (key, value) pairs showing internal information."""
        import coverage as covmod
        import platform, re
        try:
            implementation = platform.python_implementation()
        except AttributeError:
            implementation = 'unknown'

        info = [('version', covmod.__version__), ('coverage', covmod.__file__),
                ('cover_dir', self.cover_dir), ('pylib_dirs', self.pylib_dirs),
                ('tracer', self.collector.tracer_name()),
                ('config_files', self.config.attempted_config_files),
                ('configs_read', self.config.config_files),
                ('data_path', self.data.filename),
                ('python', sys.version.replace('\n', '')),
                ('platform', platform.platform()),
                ('implementation', implementation),
                ('executable', sys.executable), ('cwd', os.getcwd()),
                ('path', sys.path),
                ('environment',
                 sorted([
                     '%s = %s' % (k, v) for k, v in iitems(os.environ)
                     if re.search('^COV|^PY', k)
                 ])), ('command_line', ' '.join(getattr(sys, 'argv',
                                                        ['???'])))]
        if self.source_match:
            info.append(('source_match', self.source_match.info()))
        if self.include_match:
            info.append(('include_match', self.include_match.info()))
        if self.omit_match:
            info.append(('omit_match', self.omit_match.info()))
        if self.cover_match:
            info.append(('cover_match', self.cover_match.info()))
        if self.pylib_match:
            info.append(('pylib_match', self.pylib_match.info()))
        return info
Ejemplo n.º 48
0
 def setUp(self):
     super(MatcherTest, self).setUp()
     self.fl = FileLocator()