def test_no_arcs_vs_unmeasured_file(self): covdata = CoverageData() covdata.set_arcs(ARCS_3) covdata.touch_file('zzz.py') self.assertEqual(covdata.lines('zzz.py'), []) self.assertIsNone(covdata.lines('no_such_file.py')) self.assertEqual(covdata.arcs('zzz.py'), []) self.assertIsNone(covdata.arcs('no_such_file.py'))
def test_touch_file_with_lines(self): covdata = CoverageData() covdata.add_lines(LINES_1) covdata.touch_file('zzz.py') self.assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py'])
class Coverage(object): """Programmatic access to coverage.py. To use:: from coverage import Coverage cov = Coverage() cov.start() #.. call your code .. cov.stop() cov.html_report(directory='covhtml') """ def __init__( self, data_file=None, data_suffix=None, cover_pylib=None, auto_data=False, timid=None, branch=None, config_file=True, source=None, omit=None, include=None, debug=None, concurrency=None, ): """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended (with a dot) to `data_file` to create the final file name. If `data_suffix` is simply True, then a suffix is created with the machine and process identity included. `cover_pylib` is a boolean determining whether Python code installed with the Python interpreter is measured. This includes the Python standard library and any packages installed with the interpreter. If `auto_data` is true, then any existing data file will be read when coverage measurement starts, and data will be saved automatically when measurement stops. If `timid` is true, then a slower and simpler trace function will be used. This is important for some environments where manipulation of tracing functions breaks the faster trace function. If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. `config_file` determines what config file to read. If it is a string, it is the name of the config file to read. If it is True, then a standard file is read (".coveragerc"). If it is False, then no file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. `include` and `omit` are lists of filename patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. `debug` is a list of strings indicating what debugging information is desired. `concurrency` is a string indicating the concurrency library being used in the measured code. Without this, coverage.py will get incorrect results. Valid strings are "greenlet", "eventlet", "gevent", or "thread" (the default). """ # Build our configuration from a number of sources: # 1: defaults: self.config = CoverageConfig() # 2: from the rcfile, .coveragerc or setup.cfg file: if config_file: did_read_rc = False specified_file = (config_file is not True) if not specified_file: config_file = ".coveragerc" did_read_rc = self.config.from_file(config_file) if not did_read_rc: if specified_file: raise CoverageException( "Couldn't read '%s' as a config file" % config_file ) self.config.from_file("setup.cfg", section_prefix="coverage:") # 3: from environment variables: env_data_file = os.environ.get('COVERAGE_FILE') if env_data_file: self.config.data_file = env_data_file # 4: from constructor arguments: self.config.from_args( data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include, debug=debug, concurrency=concurrency, ) self._debug_file = None self._auto_data = auto_data self._data_suffix = data_suffix # The matchers for _should_trace. self.source_match = None self.source_pkgs_match = None self.pylib_match = self.cover_match = None self.include_match = self.omit_match = None # Is it ok for no data to be collected? self._warn_no_data = True self._warn_unimported_source = True # A record of all the warnings that have been issued. self._warnings = [] # Other instance attributes, set later. self.omit = self.include = self.source = None self.source_pkgs = self.file_locator = None self.data = self.collector = None self.plugins = self.file_tracers = None self.pylib_dirs = self.cover_dir = None self.data_suffix = self.run_suffix = None self._exclude_re = None self.debug = None # State machine variables: # Have we initialized everything? self._inited = False # Have we started collecting and not stopped it? self._started = False # Have we measured some data and not harvested it? self._measured = False def _init(self): """Set all the initial state. This is called by the public methods to initialize state. This lets us construct a Coverage object, then tweak its state before this function is called. """ from coverage import __version__ if self._inited: return # Create and configure the debugging controller. if self._debug_file is None: self._debug_file = sys.stderr self.debug = DebugControl(self.config.debug, self._debug_file) # Load plugins self.plugins = Plugins.load_plugins(self.config.plugins, self.config) # TEMPORARY, because the plugin support is implemented in PyTracer. # This will be removed when that support is moved into CTracer. if self.plugins: self._warn("Setting timid=True to support plugins.") self.config.timid = True self.file_tracers = [] for plugin in self.plugins: if overrides(plugin, "file_tracer", CoveragePlugin): self.file_tracers.append(plugin) self.file_tracers.append(None) # The Python case. # _exclude_re is a dict mapping exclusion list names to compiled # regexes. self._exclude_re = {} self._exclude_regex_stale() self.file_locator = FileLocator() # The source argument can be directories or package names. self.source = [] self.source_pkgs = [] for src in self.config.source or []: if os.path.exists(src): self.source.append(self.file_locator.canonical_filename(src)) else: self.source_pkgs.append(src) self.omit = prep_patterns(self.config.omit) self.include = prep_patterns(self.config.include) self.collector = Collector( should_trace=self._should_trace, check_include=self._check_include_omit_etc, timid=self.config.timid, branch=self.config.branch, warn=self._warn, concurrency=self.config.concurrency, ) # Suffixes are a bit tricky. We want to use the data suffix only when # collecting data, not when combining data. So we save it as # `self.run_suffix` now, and promote it to `self.data_suffix` if we # find that we are collecting data later. if self._data_suffix or self.config.parallel: if not isinstance(self._data_suffix, string_class): # if data_suffix=True, use .machinename.pid.random self._data_suffix = True else: self._data_suffix = None self.data_suffix = None self.run_suffix = self._data_suffix # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. self.data = CoverageData( basename=self.config.data_file, collector="coverage v%s" % __version__, debug=self.debug, ) # The dirs for files considered "installed with the interpreter". self.pylib_dirs = set() if not self.config.cover_pylib: # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some # environments (virtualenv, for example), these modules may be # spread across a few locations. Look at all the candidate modules # we've imported, and take all the different ones. for m in (atexit, os, platform, random, socket, _structseq): if m is not None and hasattr(m, "__file__"): self.pylib_dirs.add(self._canonical_dir(m)) if _structseq and not hasattr(_structseq, '__file__'): # PyPy 2.4 has no __file__ in the builtin modules, but the code # objects still have the filenames. So dig into one to find # the path to exclude. structseq_new = _structseq.structseq_new try: structseq_file = structseq_new.func_code.co_filename except AttributeError: structseq_file = structseq_new.__code__.co_filename self.pylib_dirs.add(self._canonical_dir(structseq_file)) # To avoid tracing the coverage code itself, we skip anything located # where we are. self.cover_dir = self._canonical_dir(__file__) # Set the reporting precision. Numbers.set_precision(self.config.precision) atexit.register(self._atexit) self._inited = True # Create the matchers we need for _should_trace if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) self.source_pkgs_match = ModuleMatcher(self.source_pkgs) else: if self.cover_dir: self.cover_match = TreeMatcher([self.cover_dir]) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: self.include_match = FnmatchMatcher(self.include) if self.omit: self.omit_match = FnmatchMatcher(self.omit) # The user may want to debug things, show info if desired. wrote_any = False if self.debug.should('config'): config_info = sorted(self.config.__dict__.items()) self.debug.write_formatted_info("config", config_info) wrote_any = True if self.debug.should('sys'): self.debug.write_formatted_info("sys", self.sys_info()) for plugin in self.plugins: header = "sys: " + plugin.plugin_name info = plugin.sys_info() self.debug.write_formatted_info(header, info) wrote_any = True if wrote_any: self.debug.write_formatted_info("end", ()) def _canonical_dir(self, morf): """Return the canonical directory of the module or file `morf`.""" morf_filename = PythonCodeUnit(morf, self).filename return os.path.split(morf_filename)[0] def _source_for_file(self, filename): """Return the source file for `filename`. Given a filename being traced, return the best guess as to the source file to attribute it to. """ if filename.endswith(".py"): # .py files are themselves source files. return filename elif filename.endswith((".pyc", ".pyo")): # Bytecode files probably have source files near them. py_filename = filename[:-1] if os.path.exists(py_filename): # Found a .py file, use that. return py_filename if env.WINDOWS: # On Windows, it could be a .pyw file. pyw_filename = py_filename + "w" if os.path.exists(pyw_filename): return pyw_filename # Didn't find source, but it's probably the .py file we want. return py_filename elif filename.endswith("$py.class"): # Jython is easy to guess. return filename[:-9] + ".py" # No idea, just use the filename as-is. return filename def _name_for_module(self, module_globals, filename): """Get the name of the module for a set of globals and filename. For configurability's sake, we allow __main__ modules to be matched by their importable name. If loaded via runpy (aka -m), we can usually recover the "original" full dotted module name, otherwise, we resort to interpreting the filename to get the module's name. In the case that the module name can't be determined, None is returned. """ dunder_name = module_globals.get('__name__', None) if isinstance(dunder_name, str) and dunder_name != '__main__': # This is the usual case: an imported module. return dunder_name loader = module_globals.get('__loader__', None) for attrname in ('fullname', 'name'): # attribute renamed in py3.2 if hasattr(loader, attrname): fullname = getattr(loader, attrname) else: continue if isinstance(fullname, str) and fullname != '__main__': # Module loaded via: runpy -m return fullname # Script as first argument to Python command line. inspectedname = inspect.getmodulename(filename) if inspectedname is not None: return inspectedname else: return dunder_name def _should_trace_internal(self, filename, frame): """Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a FileDisposition object. """ disp = FileDisposition(filename) def nope(disp, reason): """Simple helper to make it easy to return NO.""" disp.trace = False disp.reason = reason return disp # Compiled Python files have two filenames: frame.f_code.co_filename is # the filename at the time the .pyc was compiled. The second name is # __file__, which is where the .pyc was actually loaded from. Since # .pyc files can be moved after compilation (for example, by being # installed), we look for __file__ in the frame and prefer it to the # co_filename value. dunder_file = frame.f_globals.get('__file__') if dunder_file: filename = self._source_for_file(dunder_file) if not filename: # Empty string is pretty useless. return nope(disp, "empty string isn't a filename") if filename.startswith('memory:'): return nope(disp, "memory isn't traceable") if filename.startswith('<'): # Lots of non-file execution is represented with artificial # filenames like "<string>", "<doctest readme.txt[0]>", or # "<exec_function>". Don't ever trace these executions, since we # can't do anything with the data later anyway. return nope(disp, "not a real filename") # Jython reports the .class file to the tracer, use the source file. if filename.endswith("$py.class"): filename = filename[:-9] + ".py" canonical = self.file_locator.canonical_filename(filename) disp.canonical_filename = canonical # Try the plugins, see if they have an opinion about the file. for plugin in self.file_tracers: if plugin: file_tracer = plugin.file_tracer(canonical) if file_tracer is not None: file_tracer.plugin_name = plugin.plugin_name disp.trace = True disp.file_tracer = file_tracer if file_tracer.has_dynamic_source_filename(): disp.has_dynamic_filename = True else: disp.source_filename = self.file_locator.canonical_filename(file_tracer.source_filename()) else: disp.trace = True disp.source_filename = canonical file_tracer = None if disp.trace: if file_tracer: disp.file_tracer = file_tracer if not disp.has_dynamic_filename: if disp.source_filename is None: raise CoverageException( "Plugin %r didn't set source_filename for %r" % (plugin, disp.original_filename) ) if disp.check_filters: reason = self._check_include_omit_etc_internal( disp.source_filename, frame, ) if reason: nope(disp, reason) return disp return nope(disp, "no plugin found") # TODO: a test that causes this. def _check_include_omit_etc_internal(self, filename, frame): """Check a filename against the include, omit, etc, rules. Returns a string or None. String means, don't trace, and is the reason why. None means no reason found to not trace. """ modulename = self._name_for_module(frame.f_globals, filename) # If the user specified source or include, then that's authoritative # about the outer bound of what to measure and we don't have to apply # any canned exclusions. If they didn't, then we have to exclude the # stdlib and coverage.py directories. if self.source_match: if self.source_pkgs_match.match(modulename): if modulename in self.source_pkgs: self.source_pkgs.remove(modulename) return None # There's no reason to skip this file. if not self.source_match.match(filename): return "falls outside the --source trees" elif self.include_match: if not self.include_match.match(filename): return "falls outside the --include trees" else: # If we aren't supposed to trace installed code, then check if this # is near the Python standard library and skip it if so. if self.pylib_match and self.pylib_match.match(filename): return "is in the stdlib" # We exclude the coverage code itself, since a little of it will be # measured otherwise. if self.cover_match and self.cover_match.match(filename): return "is part of coverage.py" # Check the file against the omit pattern. if self.omit_match and self.omit_match.match(filename): return "is inside an --omit pattern" # No reason found to skip this file. return None def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename`. Calls `_should_trace_internal`, and returns the FileDisposition. """ disp = self._should_trace_internal(filename, frame) if self.debug.should('trace'): self.debug.write(disp.debug_message()) return disp def _check_include_omit_etc(self, filename, frame): """Check a filename against the include/omit/etc, rules, verbosely. Returns a boolean: True if the file should be traced, False if not. """ reason = self._check_include_omit_etc_internal(filename, frame) if self.debug.should('trace'): if not reason: msg = "Tracing %r" % (filename,) else: msg = "Not tracing %r: %s" % (filename, reason) self.debug.write(msg) return not reason def _warn(self, msg): """Use `msg` as a warning.""" self._warnings.append(msg) sys.stderr.write("Coverage.py warning: %s\n" % msg) def use_cache(self, usecache): """Control the use of a data file (incorrectly called a cache). `usecache` is true or false, whether to read and write data on disk. """ self._init() self.data.usefile(usecache) def load(self): """Load previously-collected coverage data from the data file.""" self._init() self.collector.reset() self.data.read() def start(self): """Start measuring code coverage. Coverage measurement actually occurs in functions called after `start` is invoked. Statements in the same scope as `start` won't be measured. Once you invoke `start`, you must also call `stop` eventually, or your process might not shut down cleanly. """ self._init() if self.run_suffix: # Calling start() means we're running code, so use the run_suffix # as the data_suffix when we eventually save the data. self.data_suffix = self.run_suffix if self._auto_data: self.load() self.collector.start() self._started = True self._measured = True def stop(self): """Stop measuring code coverage.""" if self._started: self.collector.stop() self._started = False def _atexit(self): """Clean up on process shutdown.""" if self._started: self.stop() if self._auto_data: self.save() def erase(self): """Erase previously-collected coverage data. This removes the in-memory data collected in this session as well as discarding the data file. """ self._init() self.collector.reset() self.data.erase() def clear_exclude(self, which='exclude'): """Clear the exclude list.""" self._init() setattr(self.config, which + "_list", []) self._exclude_regex_stale() def exclude(self, regex, which='exclude'): """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ self._init() excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self): """Drop all the compiled exclusion regexes, a list was modified.""" self._exclude_re.clear() def _exclude_regex(self, which): """Return a compiled regex for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which='exclude'): """Return a list of excluded regex patterns. `which` indicates which list is desired. See `exclude` for the lists that are available, and their meaning. """ self._init() return getattr(self.config, which + "_list") def save(self): """Save the collected coverage data to the data file.""" self._init() data_suffix = self.data_suffix if data_suffix is True: # If data_suffix was a simple true value, then make a suffix with # plenty of distinguishing information. We do this here in # `save()` at the last minute so that the pid will be correct even # if the process forks. extra = "" if _TEST_NAME_FILE: # pragma: debugging with open(_TEST_NAME_FILE) as f: test_name = f.read() extra = "." + test_name data_suffix = "%s%s.%s.%06d" % ( socket.gethostname(), extra, os.getpid(), random.randint(0, 999999) ) self._harvest_data() self.data.write(suffix=data_suffix) def combine(self): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. """ self._init() aliases = None if self.config.paths: aliases = PathAliases(self.file_locator) for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) self.data.combine_parallel_data(aliases=aliases) def _harvest_data(self): """Get the collected data and reset the collector. Also warn about various problems collecting data. """ self._init() if not self._measured: return # TODO: seems like this parallel structure is getting kinda old... self.data.add_line_data(self.collector.get_line_data()) self.data.add_arc_data(self.collector.get_arc_data()) self.data.add_plugin_data(self.collector.get_plugin_data()) self.collector.reset() # If there are still entries in the source_pkgs list, then we never # encountered those packages. if self._warn_unimported_source: for pkg in self.source_pkgs: if pkg not in sys.modules: self._warn("Module %s was never imported." % pkg) elif not ( hasattr(sys.modules[pkg], '__file__') and os.path.exists(sys.modules[pkg].__file__) ): self._warn("Module %s has no Python source." % pkg) else: raise AssertionError( "Unexpected third case: name = %s, " "object = %r, " "__file__ = %s" % ( pkg, sys.modules[pkg], sys.modules[pkg].__file__ ) ) # Find out if we got any data. summary = self.data.summary() if not summary and self._warn_no_data: self._warn("No data was collected.") # Find files that were never executed at all. for src in self.source: for py_file in find_python_files(src): py_file = self.file_locator.canonical_filename(py_file) if self.omit_match and self.omit_match.match(py_file): # Turns out this file was omitted, so don't pull it back # in as unexecuted. continue self.data.touch_file(py_file) self._measured = False # Backward compatibility with version 1. def analysis(self, morf): """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf def analysis2(self, morf): """Analyze a module. `morf` is a module or a filename. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The filename for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ self._init() analysis = self._analyze(morf) return ( analysis.filename, sorted(analysis.statements), sorted(analysis.excluded), sorted(analysis.missing), analysis.missing_formatted(), ) def _analyze(self, it): """Analyze a single morf or code unit. Returns an `Analysis` object. """ self._harvest_data() if not isinstance(it, FileReporter): it = self._get_file_reporter(it) return Analysis(self, it) def _get_file_reporter(self, morf): """Get a FileReporter for a module or filename.""" plugin = None if isinstance(morf, string_class): plugin_name = self.data.plugin_data().get(morf) if plugin_name: plugin = self.plugins.get(plugin_name) if plugin: file_reporter = plugin.file_reporter(morf) if file_reporter is None: raise CoverageException( "Plugin %r did not provide a file reporter for %r." % ( plugin.plugin_name, morf ) ) else: file_reporter = PythonCodeUnit(morf, self) return file_reporter def _get_file_reporters(self, morfs=None): """Get a list of FileReporters for a list of modules or filenames. For each module or filename in `morfs`, find a FileReporter. Return the list of FileReporters. If `morfs` is a single module or filename, this returns a list of one FileReporter. If `morfs` is empty or None, then the list of all files measured is used to find the FileReporters. """ if not morfs: morfs = self.data.measured_files() # Be sure we have a list. if not isinstance(morfs, (list, tuple)): morfs = [morfs] file_reporters = [] for morf in morfs: file_reporter = self._get_file_reporter(morf) file_reporters.append(file_reporter) return file_reporters def report( self, morfs=None, show_missing=True, ignore_errors=None, file=None, # pylint: disable=redefined-builtin omit=None, include=None, skip_covered=False, ): """Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. `include` is a list of filename patterns. Modules whose filenames match those patterns will be included in the report. Modules matching `omit` will not be included in the report. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, show_missing=show_missing, skip_covered=skip_covered, ) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file) def annotate( self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, ): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See `coverage.report()` for other arguments. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include ) reporter = AnnotateReporter(self, self.config) reporter.report(morfs, directory=directory) def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, extra_css=None, title=None): """Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the overview starting point, with links to more detailed pages for individual modules. `extra_css` is a path to a file of other CSS to apply on the page. It will be copied into the HTML directory. `title` is a text string (not HTML) to use as the title of the HTML report. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, extra_css=extra_css, html_title=title, ) reporter = HtmlReporter(self, self.config) return reporter.report(morfs) def xml_report( self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None, ): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile, ) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: # Ensure that the output directory is created; done here # because this report pre-opens the output file. # HTMLReport does this using the Report plumbing because # its task is more complex, being multiple files. output_dir = os.path.dirname(self.config.xml_output) if output_dir and not os.path.isdir(output_dir): os.makedirs(output_dir) outfile = open(self.config.xml_output, "w") file_to_close = outfile try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output) def sys_info(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod self._init() try: implementation = platform.python_implementation() except AttributeError: implementation = "unknown" info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), ('cover_dir', self.cover_dir), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), ('config_files', self.config.attempted_config_files), ('configs_read', self.config.config_files), ('data_path', self.data.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', implementation), ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), ('environment', sorted( ("%s = %s" % (k, v)) for k, v in iitems(os.environ) if k.startswith(("COV", "PY")) )), ('command_line', " ".join(getattr(sys, 'argv', ['???']))), ] matcher_names = [ 'source_match', 'source_pkgs_match', 'include_match', 'omit_match', 'cover_match', 'pylib_match', ] for matcher_name in matcher_names: matcher = getattr(self, matcher_name) if matcher: matcher_info = matcher.info() else: matcher_info = '-none-' info.append((matcher_name, matcher_info)) return info
class coverage(object): """Programmatic access to coverage.py. To use:: from coverage import coverage cov = coverage() cov.start() #.. call your code .. cov.stop() cov.html_report(directory='covhtml') """ def __init__(self, data_file=None, data_suffix=None, cover_pylib=None, auto_data=False, timid=None, branch=None, config_file=True, source=None, omit=None, include=None, debug=None, debug_file=None): """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended (with a dot) to `data_file` to create the final file name. If `data_suffix` is simply True, then a suffix is created with the machine and process identity included. `cover_pylib` is a boolean determining whether Python code installed with the Python interpreter is measured. This includes the Python standard library and any packages installed with the interpreter. If `auto_data` is true, then any existing data file will be read when coverage measurement starts, and data will be saved automatically when measurement stops. If `timid` is true, then a slower and simpler trace function will be used. This is important for some environments where manipulation of tracing functions breaks the faster trace function. If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. `config_file` determines what config file to read. If it is a string, it is the name of the config file to read. If it is True, then a standard file is read (".coveragerc"). If it is False, then no file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. `include` and `omit` are lists of filename patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. `debug` is a list of strings indicating what debugging information is desired. `debug_file` is the file to write debug messages to, defaulting to stderr. """ from coverage import __version__ self._warnings = [] self.config = CoverageConfig() if config_file: if config_file is True: config_file = '.coveragerc' try: self.config.from_file(config_file) except ValueError: _, err, _ = sys.exc_info() raise CoverageException("Couldn't read config file %s: %s" % (config_file, err)) self.config.from_environment('COVERAGE_OPTIONS') env_data_file = os.environ.get('COVERAGE_FILE') if env_data_file: self.config.data_file = env_data_file self.config.from_args(data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include, debug=debug) self.debug = DebugControl(self.config.debug, debug_file or sys.stderr) self.auto_data = auto_data self._exclude_re = {} self._exclude_regex_stale() self.file_locator = FileLocator() self.source = [] self.source_pkgs = [] for src in self.config.source or []: if os.path.exists(src): self.source.append(self.file_locator.canonical_filename(src)) else: self.source_pkgs.append(src) self.omit = prep_patterns(self.config.omit) self.include = prep_patterns(self.config.include) self.collector = Collector(self._should_trace, timid=self.config.timid, branch=self.config.branch, warn=self._warn) if data_suffix or self.config.parallel: if not isinstance(data_suffix, string_class): data_suffix = True else: data_suffix = None self.data_suffix = None self.run_suffix = data_suffix self.data = CoverageData(basename=self.config.data_file, collector='coverage v%s' % __version__, debug=self.debug) self.pylib_dirs = [] if not self.config.cover_pylib: for m in (atexit, os, random, socket, _structseq): if m is not None and hasattr(m, '__file__'): m_dir = self._canonical_dir(m) if m_dir not in self.pylib_dirs: self.pylib_dirs.append(m_dir) self.cover_dir = self._canonical_dir(__file__) self.source_match = None self.pylib_match = self.cover_match = None self.include_match = self.omit_match = None Numbers.set_precision(self.config.precision) self._warn_no_data = True self._warn_unimported_source = True self._started = False self._measured = False atexit.register(self._atexit) def _canonical_dir(self, morf): """Return the canonical directory of the module or file `morf`.""" return os.path.split(CodeUnit(morf, self.file_locator).filename)[0] def _source_for_file(self, filename): """Return the source file for `filename`.""" if not filename.endswith('.py'): if filename[-4:-1] == '.py': filename = filename[:-1] elif filename.endswith('$py.class'): filename = filename[:-9] + '.py' return filename def _should_trace_with_reason(self, filename, frame): """Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a pair of values: the first indicates whether the file should be traced: it's a canonicalized filename if it should be traced, None if it should not. The second value is a string, the resason for the decision. """ if not filename: return (None, "empty string isn't a filename") if filename.startswith('<'): return (None, 'not a real filename') self._check_for_packages() dunder_file = frame.f_globals.get('__file__') if dunder_file: filename = self._source_for_file(dunder_file) if filename.endswith('$py.class'): filename = filename[:-9] + '.py' canonical = self.file_locator.canonical_filename(filename) if self.source_match: if not self.source_match.match(canonical): return (None, 'falls outside the --source trees') elif self.include_match: if not self.include_match.match(canonical): return (None, 'falls outside the --include trees') else: if self.pylib_match and self.pylib_match.match(canonical): return (None, 'is in the stdlib') if self.cover_match and self.cover_match.match(canonical): return (None, 'is part of coverage.py') if self.omit_match and self.omit_match.match(canonical): return (None, 'is inside an --omit pattern') return (canonical, 'because we love you') def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename`. Calls `_should_trace_with_reason`, and returns just the decision. """ canonical, reason = self._should_trace_with_reason(filename, frame) if self.debug.should('trace'): if not canonical: msg = 'Not tracing %r: %s' % (filename, reason) else: msg = 'Tracing %r' % (filename, ) self.debug.write(msg) return canonical def _warn(self, msg): """Use `msg` as a warning.""" self._warnings.append(msg) sys.stderr.write('Coverage.py warning: %s\n' % msg) def _check_for_packages(self): """Update the source_match matcher with latest imported packages.""" if self.source_pkgs: found = [] for pkg in self.source_pkgs: try: mod = sys.modules[pkg] except KeyError: continue found.append(pkg) try: pkg_file = mod.__file__ except AttributeError: pkg_file = None else: d, f = os.path.split(pkg_file) if f.startswith('__init__'): pkg_file = d else: pkg_file = self._source_for_file(pkg_file) pkg_file = self.file_locator.canonical_filename(pkg_file) if not os.path.exists(pkg_file): pkg_file = None if pkg_file: self.source.append(pkg_file) self.source_match.add(pkg_file) else: self._warn('Module %s has no Python source.' % pkg) for pkg in found: self.source_pkgs.remove(pkg) def use_cache(self, usecache): """Control the use of a data file (incorrectly called a cache). `usecache` is true or false, whether to read and write data on disk. """ self.data.usefile(usecache) def load(self): """Load previously-collected coverage data from the data file.""" self.collector.reset() self.data.read() def start(self): """Start measuring code coverage. Coverage measurement actually occurs in functions called after `start` is invoked. Statements in the same scope as `start` won't be measured. Once you invoke `start`, you must also call `stop` eventually, or your process might not shut down cleanly. """ if self.run_suffix: self.data_suffix = self.run_suffix if self.auto_data: self.load() if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) else: if self.cover_dir: self.cover_match = TreeMatcher([self.cover_dir]) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: self.include_match = FnmatchMatcher(self.include) if self.omit: self.omit_match = FnmatchMatcher(self.omit) if self.debug.should('config'): self.debug.write('Configuration values:') config_info = sorted(self.config.__dict__.items()) self.debug.write_formatted_info(config_info) if self.debug.should('sys'): self.debug.write('Debugging info:') self.debug.write_formatted_info(self.sysinfo()) self.collector.start() self._started = True self._measured = True def stop(self): """Stop measuring code coverage.""" self._started = False self.collector.stop() def _atexit(self): """Clean up on process shutdown.""" if self._started: self.stop() if self.auto_data: self.save() def erase(self): """Erase previously-collected coverage data. This removes the in-memory data collected in this session as well as discarding the data file. """ self.collector.reset() self.data.erase() def clear_exclude(self, which='exclude'): """Clear the exclude list.""" setattr(self.config, which + '_list', []) self._exclude_regex_stale() def exclude(self, regex, which='exclude'): """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ excl_list = getattr(self.config, which + '_list') excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self): """Drop all the compiled exclusion regexes, a list was modified.""" self._exclude_re.clear() def _exclude_regex(self, which): """Return a compiled regex for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + '_list') self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which='exclude'): """Return a list of excluded regex patterns. `which` indicates which list is desired. See `exclude` for the lists that are available, and their meaning. """ return getattr(self.config, which + '_list') def save(self): """Save the collected coverage data to the data file.""" data_suffix = self.data_suffix if data_suffix is True: extra = '' if _TEST_NAME_FILE: f = open(_TEST_NAME_FILE) test_name = f.read() f.close() extra = '.' + test_name data_suffix = '%s%s.%s.%06d' % (socket.gethostname(), extra, os.getpid(), random.randint(0, 999999)) self._harvest_data() self.data.write(suffix=data_suffix) def combine(self): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. """ aliases = None if self.config.paths: aliases = PathAliases(self.file_locator) for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) self.data.combine_parallel_data(aliases=aliases) def _harvest_data(self): """Get the collected data and reset the collector. Also warn about various problems collecting data. """ if not self._measured: return self.data.add_line_data(self.collector.get_line_data()) self.data.add_arc_data(self.collector.get_arc_data()) self.collector.reset() if self._warn_unimported_source: for pkg in self.source_pkgs: self._warn('Module %s was never imported.' % pkg) summary = self.data.summary() if not summary and self._warn_no_data: self._warn('No data was collected.') for src in self.source: for py_file in find_python_files(src): py_file = self.file_locator.canonical_filename(py_file) if self.omit_match and self.omit_match.match(py_file): continue self.data.touch_file(py_file) self._measured = False def analysis(self, morf): """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return (f, s, m, mf) def analysis2(self, morf): """Analyze a module. `morf` is a module or a filename. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The filename for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ analysis = self._analyze(morf) return (analysis.filename, analysis.statements, analysis.excluded, analysis.missing, analysis.missing_formatted()) def _analyze(self, it): """Analyze a single morf or code unit. Returns an `Analysis` object. """ self._harvest_data() if not isinstance(it, CodeUnit): it = code_unit_factory(it, self.file_locator)[0] return Analysis(self, it) def report(self, morfs=None, show_missing=True, ignore_errors=None, file=None, omit=None, include=None): """Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. `include` is a list of filename patterns. Modules whose filenames match those patterns will be included in the report. Modules matching `omit` will not be included in the report. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, show_missing=show_missing) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file) def annotate(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See `coverage.report()` for other arguments. """ self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include) reporter = AnnotateReporter(self, self.config) reporter.report(morfs, directory=directory) def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, extra_css=None, title=None): """Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the overview starting point, with links to more detailed pages for individual modules. `extra_css` is a path to a file of other CSS to apply on the page. It will be copied into the HTML directory. `title` is a text string (not HTML) to use as the title of the HTML report. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, extra_css=extra_css, html_title=title) reporter = HtmlReporter(self, self.config) return reporter.report(morfs) def xml_report(self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: outfile = open(self.config.xml_output, 'w') file_to_close = outfile try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output) def sysinfo(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod import platform, re try: implementation = platform.python_implementation() except AttributeError: implementation = 'unknown' info = [('version', covmod.__version__), ('coverage', covmod.__file__), ('cover_dir', self.cover_dir), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), ('config_files', self.config.attempted_config_files), ('configs_read', self.config.config_files), ('data_path', self.data.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', implementation), ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), ('environment', sorted([ '%s = %s' % (k, v) for k, v in iitems(os.environ) if re.search('^COV|^PY', k) ])), ('command_line', ' '.join(getattr(sys, 'argv', ['???'])))] if self.source_match: info.append(('source_match', self.source_match.info())) if self.include_match: info.append(('include_match', self.include_match.info())) if self.omit_match: info.append(('omit_match', self.omit_match.info())) if self.cover_match: info.append(('cover_match', self.cover_match.info())) if self.pylib_match: info.append(('pylib_match', self.pylib_match.info())) return info
class Coverage(object): """Programmatic access to coverage.py. To use:: from coverage import coverage cov = Coverage() cov.start() #.. call your code .. cov.stop() cov.html_report(directory='covhtml') """ def __init__(self, data_file=None, data_suffix=None, cover_pylib=None, auto_data=False, timid=None, branch=None, config_file=True, source=None, omit=None, include=None, debug=None, debug_file=None, concurrency=None, plugins=None): """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended (with a dot) to `data_file` to create the final file name. If `data_suffix` is simply True, then a suffix is created with the machine and process identity included. `cover_pylib` is a boolean determining whether Python code installed with the Python interpreter is measured. This includes the Python standard library and any packages installed with the interpreter. If `auto_data` is true, then any existing data file will be read when coverage measurement starts, and data will be saved automatically when measurement stops. If `timid` is true, then a slower and simpler trace function will be used. This is important for some environments where manipulation of tracing functions breaks the faster trace function. If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. `config_file` determines what config file to read. If it is a string, it is the name of the config file to read. If it is True, then a standard file is read (".coveragerc"). If it is False, then no file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. `include` and `omit` are lists of filename patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. `debug` is a list of strings indicating what debugging information is desired. `debug_file` is the file to write debug messages to, defaulting to stderr. `concurrency` is a string indicating the concurrency library being used in the measured code. Without this, coverage.py will get incorrect results. Valid strings are "greenlet", "eventlet", "gevent", or "thread" (the default). `plugins` TODO. """ from coverage import __version__ # A record of all the warnings that have been issued. self._warnings = [] # Build our configuration from a number of sources: # 1: defaults: self.config = CoverageConfig() # 2: from the .coveragerc or setup.cfg file: if config_file: did_read_rc = should_read_setupcfg = False if config_file is True: config_file = ".coveragerc" should_read_setupcfg = True try: did_read_rc = self.config.from_file(config_file) except ValueError as err: raise CoverageException( "Couldn't read config file %s: %s" % (config_file, err) ) if not did_read_rc and should_read_setupcfg: self.config.from_file("setup.cfg", section_prefix="coverage:") # 3: from environment variables: self.config.from_environment('COVERAGE_OPTIONS') env_data_file = os.environ.get('COVERAGE_FILE') if env_data_file: self.config.data_file = env_data_file # 4: from constructor arguments: self.config.from_args( data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include, debug=debug, concurrency=concurrency, plugins=plugins, ) # Create and configure the debugging controller. self.debug = DebugControl(self.config.debug, debug_file or sys.stderr) # Load plugins self.plugins = Plugins.load_plugins(self.config.plugins, self.config) self.trace_judges = [] for plugin in self.plugins: if plugin_implements(plugin, "trace_judge"): self.trace_judges.append(plugin) self.trace_judges.append(None) # The Python case. self.auto_data = auto_data # _exclude_re is a dict mapping exclusion list names to compiled # regexes. self._exclude_re = {} self._exclude_regex_stale() self.file_locator = FileLocator() # The source argument can be directories or package names. self.source = [] self.source_pkgs = [] for src in self.config.source or []: if os.path.exists(src): self.source.append(self.file_locator.canonical_filename(src)) else: self.source_pkgs.append(src) self.omit = prep_patterns(self.config.omit) self.include = prep_patterns(self.config.include) self.collector = Collector( should_trace=self._should_trace, check_include=self._tracing_check_include_omit_etc, timid=self.config.timid, branch=self.config.branch, warn=self._warn, concurrency=self.config.concurrency, ) # Suffixes are a bit tricky. We want to use the data suffix only when # collecting data, not when combining data. So we save it as # `self.run_suffix` now, and promote it to `self.data_suffix` if we # find that we are collecting data later. if data_suffix or self.config.parallel: if not isinstance(data_suffix, string_class): # if data_suffix=True, use .machinename.pid.random data_suffix = True else: data_suffix = None self.data_suffix = None self.run_suffix = data_suffix # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. self.data = CoverageData( basename=self.config.data_file, collector="coverage v%s" % __version__, debug=self.debug, ) # The dirs for files considered "installed with the interpreter". self.pylib_dirs = set() if not self.config.cover_pylib: # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some # environments (virtualenv, for example), these modules may be # spread across a few locations. Look at all the candidate modules # we've imported, and take all the different ones. for m in (atexit, os, platform, random, socket, _structseq): if m is not None and hasattr(m, "__file__"): self.pylib_dirs.add(self._canonical_dir(m)) # To avoid tracing the coverage code itself, we skip anything located # where we are. self.cover_dir = self._canonical_dir(__file__) # The matchers for _should_trace. self.source_match = None self.pylib_match = self.cover_match = None self.include_match = self.omit_match = None # Set the reporting precision. Numbers.set_precision(self.config.precision) # Is it ok for no data to be collected? self._warn_no_data = True self._warn_unimported_source = True # State machine variables: # Have we started collecting and not stopped it? self._started = False # Have we measured some data and not harvested it? self._measured = False atexit.register(self._atexit) def _canonical_dir(self, morf): """Return the canonical directory of the module or file `morf`.""" morf_filename = PythonCodeUnit(morf, self.file_locator).filename return os.path.split(morf_filename)[0] def _source_for_file(self, filename): """Return the source file for `filename`.""" if not filename.endswith(".py"): if filename[-4:-1] == ".py": filename = filename[:-1] elif filename.endswith("$py.class"): # jython filename = filename[:-9] + ".py" return filename def _should_trace_with_reason(self, filename, frame): """Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a FileDisposition object. """ disp = FileDisposition(filename) def nope(disp, reason): disp.trace = False disp.reason = reason return disp self._check_for_packages() # Compiled Python files have two filenames: frame.f_code.co_filename is # the filename at the time the .pyc was compiled. The second name is # __file__, which is where the .pyc was actually loaded from. Since # .pyc files can be moved after compilation (for example, by being # installed), we look for __file__ in the frame and prefer it to the # co_filename value. dunder_file = frame.f_globals.get('__file__') if dunder_file: filename = self._source_for_file(dunder_file) if not filename: # Empty string is pretty useless return nope(disp, "empty string isn't a filename") if filename.startswith('memory:'): return nope(disp, "memory isn't traceable") if filename.startswith('<'): # Lots of non-file execution is represented with artificial # filenames like "<string>", "<doctest readme.txt[0]>", or # "<exec_function>". Don't ever trace these executions, since we # can't do anything with the data later anyway. return nope(disp, "not a real filename") # Jython reports the .class file to the tracer, use the source file. if filename.endswith("$py.class"): filename = filename[:-9] + ".py" canonical = self.file_locator.canonical_filename(filename) disp.canonical_filename = canonical # Try the plugins, see if they have an opinion about the file. for plugin in self.trace_judges: if plugin: plugin.trace_judge(disp) else: disp.trace = True disp.source_filename = canonical if disp.trace: disp.plugin = plugin if disp.check_filters: reason = self._check_include_omit_etc(disp.source_filename) if reason: nope(disp, reason) return disp return nope(disp, "no plugin found") # TODO: a test that causes this. def _check_include_omit_etc(self, filename): """Check a filename against the include, omit, etc, rules. Returns a string or None. String means, don't trace, and is the reason why. None means no reason found to not trace. """ # If the user specified source or include, then that's authoritative # about the outer bound of what to measure and we don't have to apply # any canned exclusions. If they didn't, then we have to exclude the # stdlib and coverage.py directories. if self.source_match: if not self.source_match.match(filename): return "falls outside the --source trees" elif self.include_match: if not self.include_match.match(filename): return "falls outside the --include trees" else: # If we aren't supposed to trace installed code, then check if this # is near the Python standard library and skip it if so. if self.pylib_match and self.pylib_match.match(filename): return "is in the stdlib" # We exclude the coverage code itself, since a little of it will be # measured otherwise. if self.cover_match and self.cover_match.match(filename): return "is part of coverage.py" # Check the file against the omit pattern. if self.omit_match and self.omit_match.match(filename): return "is inside an --omit pattern" # No reason found to skip this file. return None def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename`. Calls `_should_trace_with_reason`, and returns the FileDisposition. """ disp = self._should_trace_with_reason(filename, frame) if self.debug.should('trace'): self.debug.write(disp.debug_message()) return disp def _tracing_check_include_omit_etc(self, filename): """Check a filename against the include, omit, etc, rules, and say so. Returns a boolean: True if the file should be traced, False if not. """ reason = self._check_include_omit_etc(filename) if self.debug.should('trace'): if not reason: msg = "Tracing %r" % (filename,) else: msg = "Not tracing %r: %s" % (filename, reason) self.debug.write(msg) return not reason def _warn(self, msg): """Use `msg` as a warning.""" self._warnings.append(msg) sys.stderr.write("Coverage.py warning: %s\n" % msg) def _check_for_packages(self): """Update the source_match matcher with latest imported packages.""" # Our self.source_pkgs attribute is a list of package names we want to # measure. Each time through here, we see if we've imported any of # them yet. If so, we add its file to source_match, and we don't have # to look for that package any more. if self.source_pkgs: found = [] for pkg in self.source_pkgs: try: mod = sys.modules[pkg] except KeyError: continue found.append(pkg) try: pkg_file = mod.__file__ except AttributeError: pkg_file = None else: d, f = os.path.split(pkg_file) if f.startswith('__init__'): # This is actually a package, return the directory. pkg_file = d else: pkg_file = self._source_for_file(pkg_file) pkg_file = self.file_locator.canonical_filename(pkg_file) if not os.path.exists(pkg_file): pkg_file = None if pkg_file: self.source.append(pkg_file) self.source_match.add(pkg_file) else: self._warn("Module %s has no Python source." % pkg) for pkg in found: self.source_pkgs.remove(pkg) def use_cache(self, usecache): """Control the use of a data file (incorrectly called a cache). `usecache` is true or false, whether to read and write data on disk. """ self.data.usefile(usecache) def load(self): """Load previously-collected coverage data from the data file.""" self.collector.reset() self.data.read() def start(self): """Start measuring code coverage. Coverage measurement actually occurs in functions called after `start` is invoked. Statements in the same scope as `start` won't be measured. Once you invoke `start`, you must also call `stop` eventually, or your process might not shut down cleanly. """ if self.run_suffix: # Calling start() means we're running code, so use the run_suffix # as the data_suffix when we eventually save the data. self.data_suffix = self.run_suffix if self.auto_data: self.load() # Create the matchers we need for _should_trace if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) else: if self.cover_dir: self.cover_match = TreeMatcher([self.cover_dir]) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: self.include_match = FnmatchMatcher(self.include) if self.omit: self.omit_match = FnmatchMatcher(self.omit) # The user may want to debug things, show info if desired. if self.debug.should('config'): self.debug.write("Configuration values:") config_info = sorted(self.config.__dict__.items()) self.debug.write_formatted_info(config_info) if self.debug.should('sys'): self.debug.write("Debugging info:") self.debug.write_formatted_info(self.sysinfo()) self.collector.start() self._started = True self._measured = True def stop(self): """Stop measuring code coverage.""" self._started = False self.collector.stop() def _atexit(self): """Clean up on process shutdown.""" if self._started: self.stop() if self.auto_data: self.save() def erase(self): """Erase previously-collected coverage data. This removes the in-memory data collected in this session as well as discarding the data file. """ self.collector.reset() self.data.erase() def clear_exclude(self, which='exclude'): """Clear the exclude list.""" setattr(self.config, which + "_list", []) self._exclude_regex_stale() def exclude(self, regex, which='exclude'): """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self): """Drop all the compiled exclusion regexes, a list was modified.""" self._exclude_re.clear() def _exclude_regex(self, which): """Return a compiled regex for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which='exclude'): """Return a list of excluded regex patterns. `which` indicates which list is desired. See `exclude` for the lists that are available, and their meaning. """ return getattr(self.config, which + "_list") def save(self): """Save the collected coverage data to the data file.""" data_suffix = self.data_suffix if data_suffix is True: # If data_suffix was a simple true value, then make a suffix with # plenty of distinguishing information. We do this here in # `save()` at the last minute so that the pid will be correct even # if the process forks. extra = "" if _TEST_NAME_FILE: f = open(_TEST_NAME_FILE) test_name = f.read() f.close() extra = "." + test_name data_suffix = "%s%s.%s.%06d" % ( socket.gethostname(), extra, os.getpid(), random.randint(0, 999999) ) self._harvest_data() self.data.write(suffix=data_suffix) def combine(self): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. """ aliases = None if self.config.paths: aliases = PathAliases(self.file_locator) for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) self.data.combine_parallel_data(aliases=aliases) def _harvest_data(self): """Get the collected data and reset the collector. Also warn about various problems collecting data. """ if not self._measured: return # TODO: seems like this parallel structure is getting kinda old... self.data.add_line_data(self.collector.get_line_data()) self.data.add_arc_data(self.collector.get_arc_data()) self.data.add_plugin_data(self.collector.get_plugin_data()) self.collector.reset() # If there are still entries in the source_pkgs list, then we never # encountered those packages. if self._warn_unimported_source: for pkg in self.source_pkgs: self._warn("Module %s was never imported." % pkg) # Find out if we got any data. summary = self.data.summary() if not summary and self._warn_no_data: self._warn("No data was collected.") # Find files that were never executed at all. for src in self.source: for py_file in find_python_files(src): py_file = self.file_locator.canonical_filename(py_file) if self.omit_match and self.omit_match.match(py_file): # Turns out this file was omitted, so don't pull it back # in as unexecuted. continue self.data.touch_file(py_file) self._measured = False # Backward compatibility with version 1. def analysis(self, morf): """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf def analysis2(self, morf): """Analyze a module. `morf` is a module or a filename. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The filename for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ analysis = self._analyze(morf) return ( analysis.filename, sorted(analysis.statements), sorted(analysis.excluded), sorted(analysis.missing), analysis.missing_formatted(), ) def _analyze(self, it): """Analyze a single morf or code unit. Returns an `Analysis` object. """ def get_plugin(filename): """For code_unit_factory to use to find the plugin for a file.""" plugin = None plugin_name = self.data.plugin_data().get(filename) if plugin_name: plugin = self.plugins.get(plugin_name) return plugin self._harvest_data() if not isinstance(it, CodeUnit): it = code_unit_factory(it, self.file_locator, get_plugin)[0] return Analysis(self, it) def report(self, morfs=None, show_missing=True, ignore_errors=None, file=None, # pylint: disable=W0622 omit=None, include=None ): """Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. `include` is a list of filename patterns. Modules whose filenames match those patterns will be included in the report. Modules matching `omit` will not be included in the report. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, show_missing=show_missing, ) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file) def annotate(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See `coverage.report()` for other arguments. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include ) reporter = AnnotateReporter(self, self.config) reporter.report(morfs, directory=directory) def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, extra_css=None, title=None): """Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the overview starting point, with links to more detailed pages for individual modules. `extra_css` is a path to a file of other CSS to apply on the page. It will be copied into the HTML directory. `title` is a text string (not HTML) to use as the title of the HTML report. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, extra_css=extra_css, html_title=title, ) reporter = HtmlReporter(self, self.config) return reporter.report(morfs) def xml_report(self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile, ) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: # Ensure that the output directory is created; done here # because this report pre-opens the output file. # HTMLReport does this using the Report plumbing because # its task is more complex, being multiple files. output_dir = os.path.dirname(self.config.xml_output) if output_dir and not os.path.isdir(output_dir): os.makedirs(output_dir) outfile = open(self.config.xml_output, "w") file_to_close = outfile try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output) def sysinfo(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod try: implementation = platform.python_implementation() except AttributeError: implementation = "unknown" info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), ('cover_dir', self.cover_dir), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), ('config_files', self.config.attempted_config_files), ('configs_read', self.config.config_files), ('data_path', self.data.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', implementation), ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), ('environment', sorted( ("%s = %s" % (k, v)) for k, v in iitems(os.environ) if k.startswith(("COV", "PY")) )), ('command_line', " ".join(getattr(sys, 'argv', ['???']))), ] if self.source_match: info.append(('source_match', self.source_match.info())) if self.include_match: info.append(('include_match', self.include_match.info())) if self.omit_match: info.append(('omit_match', self.omit_match.info())) if self.cover_match: info.append(('cover_match', self.cover_match.info())) if self.pylib_match: info.append(('pylib_match', self.pylib_match.info())) return info
def test_touch_file_with_arcs(self): covdata = CoverageData() covdata.set_arcs(ARCS_3) covdata.touch_file('zzz.py') self.assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py'])
class coverage(object): """Programmatic access to Coverage. To use:: from coverage import coverage cov = coverage() cov.start() #.. blah blah (run your code) blah blah .. cov.stop() cov.html_report(directory='covhtml') """ def __init__(self, data_file=None, data_suffix=None, cover_pylib=None, auto_data=False, timid=None, branch=None, config_file=True, source=None, omit=None, include=None): """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended (with a dot) to `data_file` to create the final file name. If `data_suffix` is simply True, then a suffix is created with the machine and process identity included. `cover_pylib` is a boolean determining whether Python code installed with the Python interpreter is measured. This includes the Python standard library and any packages installed with the interpreter. If `auto_data` is true, then any existing data file will be read when coverage measurement starts, and data will be saved automatically when measurement stops. If `timid` is true, then a slower and simpler trace function will be used. This is important for some environments where manipulation of tracing functions breaks the faster trace function. If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. `config_file` determines what config file to read. If it is a string, it is the name of the config file to read. If it is True, then a standard file is read (".coveragerc"). If it is False, then no file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. `include` and `omit` are lists of filename patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. """ from coverage import __version__ # A record of all the warnings that have been issued. self._warnings = [] # Build our configuration from a number of sources: # 1: defaults: self.config = CoverageConfig() # 2: from the coveragerc file: if config_file: if config_file is True: config_file = ".coveragerc" try: self.config.from_file(config_file) except ValueError: _, err, _ = sys.exc_info() raise CoverageException("Couldn't read config file %s: %s" % (config_file, err)) # 3: from environment variables: self.config.from_environment('COVERAGE_OPTIONS') env_data_file = os.environ.get('COVERAGE_FILE') if env_data_file: self.config.data_file = env_data_file # 4: from constructor arguments: if isinstance(omit, string_class): omit = [omit] if isinstance(include, string_class): include = [include] self.config.from_args(data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include) self.auto_data = auto_data self.atexit_registered = False # _exclude_re is a dict mapping exclusion list names to compiled # regexes. self._exclude_re = {} self._exclude_regex_stale() self.file_locator = FileLocator() # The source argument can be directories or package names. self.source = [] self.source_pkgs = [] for src in self.config.source or []: if os.path.exists(src): self.source.append(self.file_locator.canonical_filename(src)) else: self.source_pkgs.append(src) self.omit = self._prep_patterns(self.config.omit) self.include = self._prep_patterns(self.config.include) self.collector = Collector(self._should_trace, timid=self.config.timid, branch=self.config.branch, warn=self._warn) # Suffixes are a bit tricky. We want to use the data suffix only when # collecting data, not when combining data. So we save it as # `self.run_suffix` now, and promote it to `self.data_suffix` if we # find that we are collecting data later. if data_suffix or self.config.parallel: if not isinstance(data_suffix, string_class): # if data_suffix=True, use .machinename.pid.random data_suffix = True else: data_suffix = None self.data_suffix = None self.run_suffix = data_suffix # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. self.data = CoverageData(basename=self.config.data_file, collector="coverage v%s" % __version__) # The dirs for files considered "installed with the interpreter". self.pylib_dirs = [] if not self.config.cover_pylib: # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some # environments (virtualenv, for centralfitestoque), these modules may be # spread across a few locations. Look at all the candidate modules # we've imported, and take all the different ones. for m in (atexit, os, random, socket): if hasattr(m, "__file__"): m_dir = self._canonical_dir(m.__file__) if m_dir not in self.pylib_dirs: self.pylib_dirs.append(m_dir) # To avoid tracing the coverage code itself, we skip anything located # where we are. self.cover_dir = self._canonical_dir(__file__) # The matchers for _should_trace, created when tracing starts. self.source_match = None self.pylib_match = self.cover_match = None self.include_match = self.omit_match = None # Only _harvest_data once per measurement cycle. self._harvested = False # Set the reporting precision. Numbers.set_precision(self.config.precision) # When tearing down the coverage object, modules can become None. # Saving the modules as object attributes avoids problems, but it is # quite ad-hoc which modules need to be saved and which references # need to use the object attributes. self.socket = socket self.os = os self.random = random def _canonical_dir(self, f): """Return the canonical directory of the file `f`.""" return os.path.split(self.file_locator.canonical_filename(f))[0] def _source_for_file(self, filename): """Return the source file for `filename`.""" if not filename.endswith(".py"): if filename[-4:-1] == ".py": filename = filename[:-1] return filename def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename` This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a canonicalized filename if it should be traced, False if it should not. """ if os is None: return False if filename.startswith('<'): # Lots of non-file execution is represented with artificial # filenames like "<string>", "<doctest readme.txt[0]>", or # "<exec_function>". Don't ever trace these executions, since we # can't do anything with the data later anyway. return False if filename.endswith(".html"): # Jinja and maybe other templating systems compile templates into # Python code, but use the template filename as the filename in # the compiled code. Of course, those filenames are useless later # so don't bother collecting. TODO: How should we really separate # out good file extensions from bad? return False self._check_for_packages() # Compiled Python files have two filenames: frame.f_code.co_filename is # the filename at the time the .pyc was compiled. The second name is # __file__, which is where the .pyc was actually loaded from. Since # .pyc files can be moved after compilation (for centralfitestoque, by being # installed), we look for __file__ in the frame and prefer it to the # co_filename value. dunder_file = frame.f_globals.get('__file__') if dunder_file: filename = self._source_for_file(dunder_file) # Jython reports the .class file to the tracer, use the source file. if filename.endswith("$py.class"): filename = filename[:-9] + ".py" canonical = self.file_locator.canonical_filename(filename) # If the user specified source, then that's authoritative about what to # measure. If they didn't, then we have to exclude the stdlib and # coverage.py directories. if self.source_match: if not self.source_match.match(canonical): return False else: # If we aren't supposed to trace installed code, then check if this # is near the Python standard library and skip it if so. if self.pylib_match and self.pylib_match.match(canonical): return False # We exclude the coverage code itself, since a little of it will be # measured otherwise. if self.cover_match and self.cover_match.match(canonical): return False # Check the file against the include and omit patterns. if self.include_match and not self.include_match.match(canonical): return False if self.omit_match and self.omit_match.match(canonical): return False return canonical # To log what should_trace returns, change this to "if 1:" if 0: _real_should_trace = _should_trace def _should_trace(self, filename, frame): # pylint: disable=E0102 """A logging decorator around the real _should_trace function.""" ret = self._real_should_trace(filename, frame) print("should_trace: %r -> %r" % (filename, ret)) return ret def _warn(self, msg): """Use `msg` as a warning.""" self._warnings.append(msg) sys.stderr.write("Coverage.py warning: %s\n" % msg) def _prep_patterns(self, patterns): """Prepare the file patterns for use in a `FnmatchMatcher`. If a pattern starts with a wildcard, it is used as a pattern as-is. If it does not start with a wildcard, then it is made absolute with the current directory. If `patterns` is None, an empty list is returned. """ patterns = patterns or [] prepped = [] for p in patterns or []: if p.startswith("*") or p.startswith("?"): prepped.append(p) else: prepped.append(self.file_locator.abs_file(p)) return prepped def _check_for_packages(self): """Update the source_match matcher with latest imported packages.""" # Our self.source_pkgs attribute is a list of package names we want to # measure. Each time through here, we see if we've imported any of # them yet. If so, we add its file to source_match, and we don't have # to look for that package any more. if self.source_pkgs: found = [] for pkg in self.source_pkgs: try: mod = sys.modules[pkg] except KeyError: continue found.append(pkg) try: pkg_file = mod.__file__ except AttributeError: self._warn("Module %s has no Python source." % pkg) else: d, f = os.path.split(pkg_file) if f.startswith('__init__.'): # This is actually a package, return the directory. pkg_file = d else: pkg_file = self._source_for_file(pkg_file) pkg_file = self.file_locator.canonical_filename(pkg_file) self.source.append(pkg_file) self.source_match.add(pkg_file) for pkg in found: self.source_pkgs.remove(pkg) def use_cache(self, usecache): """Control the use of a data file (incorrectly called a cache). `usecache` is true or false, whether to read and write data on disk. """ self.data.usefile(usecache) def load(self): """Load previously-collected coverage data from the data file.""" self.collector.reset() self.data.read() def start(self): """Start measuring code coverage.""" if self.run_suffix: # Calling start() means we're running code, so use the run_suffix # as the data_suffix when we eventually save the data. self.data_suffix = self.run_suffix if self.auto_data: self.load() # Save coverage data when Python exits. if not self.atexit_registered: atexit.register(self.save) self.atexit_registered = True # Create the matchers we need for _should_trace if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) else: if self.cover_dir: self.cover_match = TreeMatcher([self.cover_dir]) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: self.include_match = FnmatchMatcher(self.include) if self.omit: self.omit_match = FnmatchMatcher(self.omit) self._harvested = False self.collector.start() def stop(self): """Stop measuring code coverage.""" self.collector.stop() self._harvest_data() def erase(self): """Erase previously-collected coverage data. This removes the in-memory data collected in this session as well as discarding the data file. """ self.collector.reset() self.data.erase() def clear_exclude(self, which='exclude'): """Clear the exclude list.""" setattr(self.config, which + "_list", []) self._exclude_regex_stale() def exclude(self, regex, which='exclude'): """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self): """Drop all the compiled exclusion regexes, a list was modified.""" self._exclude_re.clear() def _exclude_regex(self, which): """Return a compiled regex for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which='exclude'): """Return a list of excluded regex patterns. `which` indicates which list is desired. See `exclude` for the lists that are available, and their meaning. """ return getattr(self.config, which + "_list") def save(self): """Save the collected coverage data to the data file.""" data_suffix = self.data_suffix if data_suffix is True: # If data_suffix was a simple true value, then make a suffix with # plenty of distinguishing information. We do this here in # `save()` at the last minute so that the pid will be correct even # if the process forks. data_suffix = "%s.%s.%06d" % (self.socket.gethostname(), self.os.getpid(), self.random.randint(0, 99999)) self._harvest_data() self.data.write(suffix=data_suffix) def combine(self): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. """ self.data.combine_parallel_data() def _harvest_data(self): """Get the collected data and reset the collector. Also warn about various problems collecting data. """ if not self._harvested: self.data.add_line_data(self.collector.get_line_data()) self.data.add_arc_data(self.collector.get_arc_data()) self.collector.reset() # If there are still entries in the source_pkgs list, then we never # encountered those packages. for pkg in self.source_pkgs: self._warn("Module %s was never imported." % pkg) # Find out if we got any data. summary = self.data.summary() if not summary: self._warn("No data was collected.") # Find files that were never executed at all. for src in self.source: for py_file in find_python_files(src): self.data.touch_file(py_file) self._harvested = True # Backward compatibility with version 1. def analysis(self, morf): """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf def analysis2(self, morf): """Analyze a module. `morf` is a module or a filename. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The filename for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ analysis = self._analyze(morf) return (analysis.filename, analysis.statements, analysis.excluded, analysis.missing, analysis.missing_formatted()) def _analyze(self, it): """Analyze a single morf or code unit. Returns an `Analysis` object. """ if not isinstance(it, CodeUnit): it = code_unit_factory(it, self.file_locator)[0] return Analysis(self, it) def report( self, morfs=None, show_missing=True, ignore_errors=None, file=None, # pylint: disable=W0622 omit=None, include=None): """Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. `include` is a list of filename patterns. Modules whose filenames match those patterns will be included in the report. Modules matching `omit` will not be included in the report. """ self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include) reporter = SummaryReporter(self, show_missing, self.config.ignore_errors) reporter.report(morfs, outfile=file, config=self.config) def annotate(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See `coverage.report()` for other arguments. """ self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include) reporter = AnnotateReporter(self, self.config.ignore_errors) reporter.report(morfs, config=self.config, directory=directory) def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None): """Generate an HTML report. See `coverage.report()` for other arguments. """ self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, ) reporter = HtmlReporter(self, self.config.ignore_errors) reporter.report(morfs, config=self.config) def xml_report(self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See `coverage.report()` for other arguments. """ self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile, ) file_to_close = None if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: outfile = open(self.config.xml_output, "w") file_to_close = outfile try: reporter = XmlReporter(self, self.config.ignore_errors) reporter.report(morfs, outfile=outfile, config=self.config) finally: if file_to_close: file_to_close.close() def sysinfo(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod import platform, re info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), ('cover_dir', self.cover_dir), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), ('data_path', self.data.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('cwd', os.getcwd()), ('path', sys.path), ('environment', [("%s = %s" % (k, v)) for k, v in os.environ.items() if re.search("^COV|^PY", k)]), ] return info
def test_empty_arcs_are_still_arcs(self): covdata = CoverageData() covdata.add_arcs({}) covdata.touch_file("abc.py") self.assertTrue(covdata.has_arcs())
class coverage(object): def __init__(self, data_file = None, data_suffix = None, cover_pylib = None, auto_data = False, timid = None, branch = None, config_file = True, source = None, omit = None, include = None, debug = None, debug_file = None): from coverage import __version__ self._warnings = [] self.config = CoverageConfig() if config_file: if config_file is True: config_file = '.coveragerc' try: self.config.from_file(config_file) except ValueError: _, err, _ = sys.exc_info() raise CoverageException("Couldn't read config file %s: %s" % (config_file, err)) self.config.from_environment('COVERAGE_OPTIONS') env_data_file = os.environ.get('COVERAGE_FILE') if env_data_file: self.config.data_file = env_data_file self.config.from_args(data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include, debug=debug) self.debug = DebugControl(self.config.debug, debug_file or sys.stderr) self.auto_data = auto_data self._exclude_re = {} self._exclude_regex_stale() self.file_locator = FileLocator() self.source = [] self.source_pkgs = [] for src in self.config.source or []: if os.path.exists(src): self.source.append(self.file_locator.canonical_filename(src)) else: self.source_pkgs.append(src) self.omit = prep_patterns(self.config.omit) self.include = prep_patterns(self.config.include) self.collector = Collector(self._should_trace, timid=self.config.timid, branch=self.config.branch, warn=self._warn) if data_suffix or self.config.parallel: if not isinstance(data_suffix, string_class): data_suffix = True else: data_suffix = None self.data_suffix = None self.run_suffix = data_suffix self.data = CoverageData(basename=self.config.data_file, collector='coverage v%s' % __version__, debug=self.debug) self.pylib_dirs = [] if not self.config.cover_pylib: for m in (atexit, os, random, socket, _structseq): if m is not None and hasattr(m, '__file__'): m_dir = self._canonical_dir(m) if m_dir not in self.pylib_dirs: self.pylib_dirs.append(m_dir) self.cover_dir = self._canonical_dir(__file__) self.source_match = None self.pylib_match = self.cover_match = None self.include_match = self.omit_match = None Numbers.set_precision(self.config.precision) self._warn_no_data = True self._warn_unimported_source = True self._started = False self._measured = False atexit.register(self._atexit) def _canonical_dir(self, morf): return os.path.split(CodeUnit(morf, self.file_locator).filename)[0] def _source_for_file(self, filename): if not filename.endswith('.py'): if filename[-4:-1] == '.py': filename = filename[:-1] elif filename.endswith('$py.class'): filename = filename[:-9] + '.py' return filename def _should_trace_with_reason(self, filename, frame): if not filename: return (None, "empty string isn't a filename") if filename.startswith('<'): return (None, 'not a real filename') self._check_for_packages() dunder_file = frame.f_globals.get('__file__') if dunder_file: filename = self._source_for_file(dunder_file) if filename.endswith('$py.class'): filename = filename[:-9] + '.py' canonical = self.file_locator.canonical_filename(filename) if self.source_match: if not self.source_match.match(canonical): return (None, 'falls outside the --source trees') elif self.include_match: if not self.include_match.match(canonical): return (None, 'falls outside the --include trees') else: if self.pylib_match and self.pylib_match.match(canonical): return (None, 'is in the stdlib') if self.cover_match and self.cover_match.match(canonical): return (None, 'is part of coverage.py') if self.omit_match and self.omit_match.match(canonical): return (None, 'is inside an --omit pattern') return (canonical, 'because we love you') def _should_trace(self, filename, frame): canonical, reason = self._should_trace_with_reason(filename, frame) if self.debug.should('trace'): if not canonical: msg = 'Not tracing %r: %s' % (filename, reason) else: msg = 'Tracing %r' % (filename,) self.debug.write(msg) return canonical def _warn(self, msg): self._warnings.append(msg) sys.stderr.write('Coverage.py warning: %s\n' % msg) def _check_for_packages(self): if self.source_pkgs: found = [] for pkg in self.source_pkgs: try: mod = sys.modules[pkg] except KeyError: continue found.append(pkg) try: pkg_file = mod.__file__ except AttributeError: pkg_file = None else: d, f = os.path.split(pkg_file) if f.startswith('__init__'): pkg_file = d else: pkg_file = self._source_for_file(pkg_file) pkg_file = self.file_locator.canonical_filename(pkg_file) if not os.path.exists(pkg_file): pkg_file = None if pkg_file: self.source.append(pkg_file) self.source_match.add(pkg_file) else: self._warn('Module %s has no Python source.' % pkg) for pkg in found: self.source_pkgs.remove(pkg) def use_cache(self, usecache): self.data.usefile(usecache) def load(self): self.collector.reset() self.data.read() def start(self): if self.run_suffix: self.data_suffix = self.run_suffix if self.auto_data: self.load() if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) else: if self.cover_dir: self.cover_match = TreeMatcher([self.cover_dir]) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: self.include_match = FnmatchMatcher(self.include) if self.omit: self.omit_match = FnmatchMatcher(self.omit) if self.debug.should('config'): self.debug.write('Configuration values:') config_info = sorted(self.config.__dict__.items()) self.debug.write_formatted_info(config_info) if self.debug.should('sys'): self.debug.write('Debugging info:') self.debug.write_formatted_info(self.sysinfo()) self.collector.start() self._started = True self._measured = True def stop(self): self._started = False self.collector.stop() def _atexit(self): if self._started: self.stop() if self.auto_data: self.save() def erase(self): self.collector.reset() self.data.erase() def clear_exclude(self, which = 'exclude'): setattr(self.config, which + '_list', []) self._exclude_regex_stale() def exclude(self, regex, which = 'exclude'): excl_list = getattr(self.config, which + '_list') excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self): self._exclude_re.clear() def _exclude_regex(self, which): if which not in self._exclude_re: excl_list = getattr(self.config, which + '_list') self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which = 'exclude'): return getattr(self.config, which + '_list') def save(self): data_suffix = self.data_suffix if data_suffix is True: extra = '' if _TEST_NAME_FILE: f = open(_TEST_NAME_FILE) test_name = f.read() f.close() extra = '.' + test_name data_suffix = '%s%s.%s.%06d' % (socket.gethostname(), extra, os.getpid(), random.randint(0, 999999)) self._harvest_data() self.data.write(suffix=data_suffix) def combine(self): aliases = None if self.config.paths: aliases = PathAliases(self.file_locator) for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) self.data.combine_parallel_data(aliases=aliases) def _harvest_data(self): if not self._measured: return self.data.add_line_data(self.collector.get_line_data()) self.data.add_arc_data(self.collector.get_arc_data()) self.collector.reset() if self._warn_unimported_source: for pkg in self.source_pkgs: self._warn('Module %s was never imported.' % pkg) summary = self.data.summary() if not summary and self._warn_no_data: self._warn('No data was collected.') for src in self.source: for py_file in find_python_files(src): py_file = self.file_locator.canonical_filename(py_file) if self.omit_match and self.omit_match.match(py_file): continue self.data.touch_file(py_file) self._measured = False def analysis(self, morf): f, s, _, m, mf = self.analysis2(morf) return (f, s, m, mf) def analysis2(self, morf): analysis = self._analyze(morf) return (analysis.filename, analysis.statements, analysis.excluded, analysis.missing, analysis.missing_formatted()) def _analyze(self, it): self._harvest_data() if not isinstance(it, CodeUnit): it = code_unit_factory(it, self.file_locator)[0] return Analysis(self, it) def report(self, morfs = None, show_missing = True, ignore_errors = None, file = None, omit = None, include = None): self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, show_missing=show_missing) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file) def annotate(self, morfs = None, directory = None, ignore_errors = None, omit = None, include = None): self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include) reporter = AnnotateReporter(self, self.config) reporter.report(morfs, directory=directory) def html_report(self, morfs = None, directory = None, ignore_errors = None, omit = None, include = None, extra_css = None, title = None): self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, extra_css=extra_css, html_title=title) reporter = HtmlReporter(self, self.config) return reporter.report(morfs) def xml_report(self, morfs = None, outfile = None, ignore_errors = None, omit = None, include = None): self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: outfile = open(self.config.xml_output, 'w') file_to_close = outfile try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output) def sysinfo(self): import coverage as covmod import platform, re try: implementation = platform.python_implementation() except AttributeError: implementation = 'unknown' info = [('version', covmod.__version__), ('coverage', covmod.__file__), ('cover_dir', self.cover_dir), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), ('config_files', self.config.attempted_config_files), ('configs_read', self.config.config_files), ('data_path', self.data.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', implementation), ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), ('environment', sorted([ '%s = %s' % (k, v) for k, v in iitems(os.environ) if re.search('^COV|^PY', k) ])), ('command_line', ' '.join(getattr(sys, 'argv', ['???'])))] if self.source_match: info.append(('source_match', self.source_match.info())) if self.include_match: info.append(('include_match', self.include_match.info())) if self.omit_match: info.append(('omit_match', self.omit_match.info())) if self.cover_match: info.append(('cover_match', self.cover_match.info())) if self.pylib_match: info.append(('pylib_match', self.pylib_match.info())) return info
class coverage(object): def __init__(self, data_file=None, data_suffix=None, cover_pylib=None, auto_data=False, timid=None, branch=None, config_file=True, source=None, omit=None, include=None, debug=None, debug_file=None): from coverage import __version__ self._warnings = [] self.config = CoverageConfig() if config_file: if config_file is True: config_file = '.coveragerc' try: self.config.from_file(config_file) except ValueError: _, err, _ = sys.exc_info() raise CoverageException("Couldn't read config file %s: %s" % (config_file, err)) self.config.from_environment('COVERAGE_OPTIONS') env_data_file = os.environ.get('COVERAGE_FILE') if env_data_file: self.config.data_file = env_data_file self.config.from_args(data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include, debug=debug) self.debug = DebugControl(self.config.debug, debug_file or sys.stderr) self.auto_data = auto_data self._exclude_re = {} self._exclude_regex_stale() self.file_locator = FileLocator() self.source = [] self.source_pkgs = [] for src in self.config.source or []: if os.path.exists(src): self.source.append(self.file_locator.canonical_filename(src)) else: self.source_pkgs.append(src) self.omit = prep_patterns(self.config.omit) self.include = prep_patterns(self.config.include) self.collector = Collector(self._should_trace, timid=self.config.timid, branch=self.config.branch, warn=self._warn) if data_suffix or self.config.parallel: if not isinstance(data_suffix, string_class): data_suffix = True else: data_suffix = None self.data_suffix = None self.run_suffix = data_suffix self.data = CoverageData(basename=self.config.data_file, collector='coverage v%s' % __version__, debug=self.debug) self.pylib_dirs = [] if not self.config.cover_pylib: for m in (atexit, os, random, socket, _structseq): if m is not None and hasattr(m, '__file__'): m_dir = self._canonical_dir(m) if m_dir not in self.pylib_dirs: self.pylib_dirs.append(m_dir) self.cover_dir = self._canonical_dir(__file__) self.source_match = None self.pylib_match = self.cover_match = None self.include_match = self.omit_match = None Numbers.set_precision(self.config.precision) self._warn_no_data = True self._warn_unimported_source = True self._started = False self._measured = False atexit.register(self._atexit) def _canonical_dir(self, morf): return os.path.split(CodeUnit(morf, self.file_locator).filename)[0] def _source_for_file(self, filename): if not filename.endswith('.py'): if filename[-4:-1] == '.py': filename = filename[:-1] elif filename.endswith('$py.class'): filename = filename[:-9] + '.py' return filename def _should_trace_with_reason(self, filename, frame): if not filename: return (None, "empty string isn't a filename") if filename.startswith('<'): return (None, 'not a real filename') self._check_for_packages() dunder_file = frame.f_globals.get('__file__') if dunder_file: filename = self._source_for_file(dunder_file) if filename.endswith('$py.class'): filename = filename[:-9] + '.py' canonical = self.file_locator.canonical_filename(filename) if self.source_match: if not self.source_match.match(canonical): return (None, 'falls outside the --source trees') elif self.include_match: if not self.include_match.match(canonical): return (None, 'falls outside the --include trees') else: if self.pylib_match and self.pylib_match.match(canonical): return (None, 'is in the stdlib') if self.cover_match and self.cover_match.match(canonical): return (None, 'is part of coverage.py') if self.omit_match and self.omit_match.match(canonical): return (None, 'is inside an --omit pattern') return (canonical, 'because we love you') def _should_trace(self, filename, frame): canonical, reason = self._should_trace_with_reason(filename, frame) if self.debug.should('trace'): if not canonical: msg = 'Not tracing %r: %s' % (filename, reason) else: msg = 'Tracing %r' % (filename, ) self.debug.write(msg) return canonical def _warn(self, msg): self._warnings.append(msg) sys.stderr.write('Coverage.py warning: %s\n' % msg) def _check_for_packages(self): if self.source_pkgs: found = [] for pkg in self.source_pkgs: try: mod = sys.modules[pkg] except KeyError: continue found.append(pkg) try: pkg_file = mod.__file__ except AttributeError: pkg_file = None else: d, f = os.path.split(pkg_file) if f.startswith('__init__'): pkg_file = d else: pkg_file = self._source_for_file(pkg_file) pkg_file = self.file_locator.canonical_filename(pkg_file) if not os.path.exists(pkg_file): pkg_file = None if pkg_file: self.source.append(pkg_file) self.source_match.add(pkg_file) else: self._warn('Module %s has no Python source.' % pkg) for pkg in found: self.source_pkgs.remove(pkg) def use_cache(self, usecache): self.data.usefile(usecache) def load(self): self.collector.reset() self.data.read() def start(self): if self.run_suffix: self.data_suffix = self.run_suffix if self.auto_data: self.load() if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) else: if self.cover_dir: self.cover_match = TreeMatcher([self.cover_dir]) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: self.include_match = FnmatchMatcher(self.include) if self.omit: self.omit_match = FnmatchMatcher(self.omit) if self.debug.should('config'): self.debug.write('Configuration values:') config_info = sorted(self.config.__dict__.items()) self.debug.write_formatted_info(config_info) if self.debug.should('sys'): self.debug.write('Debugging info:') self.debug.write_formatted_info(self.sysinfo()) self.collector.start() self._started = True self._measured = True def stop(self): self._started = False self.collector.stop() def _atexit(self): if self._started: self.stop() if self.auto_data: self.save() def erase(self): self.collector.reset() self.data.erase() def clear_exclude(self, which='exclude'): setattr(self.config, which + '_list', []) self._exclude_regex_stale() def exclude(self, regex, which='exclude'): excl_list = getattr(self.config, which + '_list') excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self): self._exclude_re.clear() def _exclude_regex(self, which): if which not in self._exclude_re: excl_list = getattr(self.config, which + '_list') self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which='exclude'): return getattr(self.config, which + '_list') def save(self): data_suffix = self.data_suffix if data_suffix is True: extra = '' if _TEST_NAME_FILE: f = open(_TEST_NAME_FILE) test_name = f.read() f.close() extra = '.' + test_name data_suffix = '%s%s.%s.%06d' % (socket.gethostname(), extra, os.getpid(), random.randint(0, 999999)) self._harvest_data() self.data.write(suffix=data_suffix) def combine(self): aliases = None if self.config.paths: aliases = PathAliases(self.file_locator) for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) self.data.combine_parallel_data(aliases=aliases) def _harvest_data(self): if not self._measured: return self.data.add_line_data(self.collector.get_line_data()) self.data.add_arc_data(self.collector.get_arc_data()) self.collector.reset() if self._warn_unimported_source: for pkg in self.source_pkgs: self._warn('Module %s was never imported.' % pkg) summary = self.data.summary() if not summary and self._warn_no_data: self._warn('No data was collected.') for src in self.source: for py_file in find_python_files(src): py_file = self.file_locator.canonical_filename(py_file) if self.omit_match and self.omit_match.match(py_file): continue self.data.touch_file(py_file) self._measured = False def analysis(self, morf): f, s, _, m, mf = self.analysis2(morf) return (f, s, m, mf) def analysis2(self, morf): analysis = self._analyze(morf) return (analysis.filename, analysis.statements, analysis.excluded, analysis.missing, analysis.missing_formatted()) def _analyze(self, it): self._harvest_data() if not isinstance(it, CodeUnit): it = code_unit_factory(it, self.file_locator)[0] return Analysis(self, it) def report(self, morfs=None, show_missing=True, ignore_errors=None, file=None, omit=None, include=None): self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, show_missing=show_missing) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file) def annotate(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None): self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include) reporter = AnnotateReporter(self, self.config) reporter.report(morfs, directory=directory) def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, extra_css=None, title=None): self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, extra_css=extra_css, html_title=title) reporter = HtmlReporter(self, self.config) return reporter.report(morfs) def xml_report(self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None): self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: outfile = open(self.config.xml_output, 'w') file_to_close = outfile try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output) def sysinfo(self): import coverage as covmod import platform, re try: implementation = platform.python_implementation() except AttributeError: implementation = 'unknown' info = [('version', covmod.__version__), ('coverage', covmod.__file__), ('cover_dir', self.cover_dir), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), ('config_files', self.config.attempted_config_files), ('configs_read', self.config.config_files), ('data_path', self.data.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', implementation), ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), ('environment', sorted([ '%s = %s' % (k, v) for k, v in iitems(os.environ) if re.search('^COV|^PY', k) ])), ('command_line', ' '.join(getattr(sys, 'argv', ['???'])))] if self.source_match: info.append(('source_match', self.source_match.info())) if self.include_match: info.append(('include_match', self.include_match.info())) if self.omit_match: info.append(('omit_match', self.omit_match.info())) if self.cover_match: info.append(('cover_match', self.cover_match.info())) if self.pylib_match: info.append(('pylib_match', self.pylib_match.info())) return info
def test_empty_lines_are_still_lines(self): covdata = CoverageData() covdata.add_lines({}) covdata.touch_file("abc.py") assert not covdata.has_arcs()
def test_no_lines_vs_unmeasured_file(self): covdata = CoverageData() covdata.add_lines(LINES_1) covdata.touch_file('zzz.py') assert covdata.lines('zzz.py') == [] assert covdata.lines('no_such_file.py') is None
def test_touch_file_with_arcs(self): covdata = CoverageData() covdata.add_arcs(ARCS_3) covdata.touch_file('zzz.py') self.assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py'])
def test_touch_file(self): covdata = CoverageData() covdata.add_line_data(DATA_1) covdata.touch_file('x.py') self.assert_measured_files(covdata, MEASURED_FILES_1 + ['x.py'])
def test_no_lines_vs_unmeasured_file(self): covdata = CoverageData() covdata.add_lines(LINES_1) covdata.touch_file('zzz.py') self.assertEqual(covdata.lines('zzz.py'), []) self.assertIsNone(covdata.lines('no_such_file.py'))
class Coverage(object): """Programmatic access to coverage.py. To use:: from coverage import Coverage cov = Coverage() cov.start() #.. call your code .. cov.stop() cov.html_report(directory='covhtml') """ def __init__( self, data_file=None, data_suffix=None, cover_pylib=None, auto_data=False, timid=None, branch=None, config_file=True, source=None, omit=None, include=None, debug=None, concurrency=None, ): """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended (with a dot) to `data_file` to create the final file name. If `data_suffix` is simply True, then a suffix is created with the machine and process identity included. `cover_pylib` is a boolean determining whether Python code installed with the Python interpreter is measured. This includes the Python standard library and any packages installed with the interpreter. If `auto_data` is true, then any existing data file will be read when coverage measurement starts, and data will be saved automatically when measurement stops. If `timid` is true, then a slower and simpler trace function will be used. This is important for some environments where manipulation of tracing functions breaks the faster trace function. If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. `config_file` determines what configuration file to read: * If it is ".coveragerc", it is interpreted as if it were True, for backward compatibility. * If it is a string, it is the name of the file to read. If the file can't be read, it is an error. * If it is True, then a few standard files names are tried (".coveragerc", "setup.cfg", "tox.ini"). It is not an error for these files to not be found. * If it is False, then no configuration file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. `include` and `omit` are lists of file name patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. `debug` is a list of strings indicating what debugging information is desired. `concurrency` is a string indicating the concurrency library being used in the measured code. Without this, coverage.py will get incorrect results if these libraries are in use. Valid strings are "greenlet", "eventlet", "gevent", "multiprocessing", or "thread" (the default). This can also be a list of these strings. .. versionadded:: 4.0 The `concurrency` parameter. .. versionadded:: 4.2 The `concurrency` parameter can now be a list of strings. """ # Build our configuration from a number of sources. self.config_file, self.config = read_coverage_config( config_file=config_file, data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, run_omit=omit, run_include=include, debug=debug, report_omit=omit, report_include=include, concurrency=concurrency, ) # This is injectable by tests. self._debug_file = None self._auto_load = self._auto_save = auto_data self._data_suffix = data_suffix # The matchers for _should_trace. self.source_match = None self.source_pkgs_match = None self.pylib_match = self.cover_match = None self.include_match = self.omit_match = None # Is it ok for no data to be collected? self._warn_no_data = True self._warn_unimported_source = True # A record of all the warnings that have been issued. self._warnings = [] # Other instance attributes, set later. self.omit = self.include = self.source = None self.source_pkgs_unmatched = None self.source_pkgs = None self.data = self.data_files = self.collector = None self.plugins = None self.pylib_paths = self.cover_paths = None self.data_suffix = self.run_suffix = None self._exclude_re = None self.debug = None # State machine variables: # Have we initialized everything? self._inited = False # Have we started collecting and not stopped it? self._started = False # If we have sub-process measurement happening automatically, then we # want any explicit creation of a Coverage object to mean, this process # is already coverage-aware, so don't auto-measure it. By now, the # auto-creation of a Coverage object has already happened. But we can # find it and tell it not to save its data. if not env.METACOV: _prevent_sub_process_measurement() def _init(self): """Set all the initial state. This is called by the public methods to initialize state. This lets us construct a :class:`Coverage` object, then tweak its state before this function is called. """ if self._inited: return self._inited = True # Create and configure the debugging controller. COVERAGE_DEBUG_FILE # is an environment variable, the name of a file to append debug logs # to. if self._debug_file is None: debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE") if debug_file_name: self._debug_file = open(debug_file_name, "a") else: self._debug_file = sys.stderr self.debug = DebugControl(self.config.debug, self._debug_file) # Load plugins self.plugins = Plugins.load_plugins(self.config.plugins, self.config, self.debug) # _exclude_re is a dict that maps exclusion list names to compiled # regexes. self._exclude_re = {} self._exclude_regex_stale() set_relative_directory() # The source argument can be directories or package names. self.source = [] self.source_pkgs = [] for src in self.config.source or []: if os.path.isdir(src): self.source.append(canonical_filename(src)) else: self.source_pkgs.append(src) self.source_pkgs_unmatched = self.source_pkgs[:] self.omit = prep_patterns(self.config.run_omit) self.include = prep_patterns(self.config.run_include) concurrency = self.config.concurrency or [] if "multiprocessing" in concurrency: if not patch_multiprocessing: raise CoverageException( # pragma: only jython "multiprocessing is not supported on this Python") patch_multiprocessing(rcfile=self.config_file) # Multi-processing uses parallel for the subprocesses, so also use # it for the main process. self.config.parallel = True self.collector = Collector( should_trace=self._should_trace, check_include=self._check_include_omit_etc, timid=self.config.timid, branch=self.config.branch, warn=self._warn, concurrency=concurrency, ) # Early warning if we aren't going to be able to support plugins. if self.plugins.file_tracers and not self.collector.supports_plugins: self._warn("Plugin file tracers (%s) aren't supported with %s" % ( ", ".join(plugin._coverage_plugin_name for plugin in self.plugins.file_tracers), self.collector.tracer_name(), )) for plugin in self.plugins.file_tracers: plugin._coverage_enabled = False # Suffixes are a bit tricky. We want to use the data suffix only when # collecting data, not when combining data. So we save it as # `self.run_suffix` now, and promote it to `self.data_suffix` if we # find that we are collecting data later. if self._data_suffix or self.config.parallel: if not isinstance(self._data_suffix, string_class): # if data_suffix=True, use .machinename.pid.random self._data_suffix = True else: self._data_suffix = None self.data_suffix = None self.run_suffix = self._data_suffix # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. self.data = CoverageData(debug=self.debug) self.data_files = CoverageDataFiles( basename=self.config.data_file, warn=self._warn, debug=self.debug, ) # The directories for files considered "installed with the interpreter". self.pylib_paths = set() if not self.config.cover_pylib: # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some # environments (virtualenv, for example), these modules may be # spread across a few locations. Look at all the candidate modules # we've imported, and take all the different ones. for m in (atexit, inspect, os, platform, _pypy_irc_topic, re, _structseq, traceback): if m is not None and hasattr(m, "__file__"): self.pylib_paths.add( self._canonical_path(m, directory=True)) if _structseq and not hasattr(_structseq, '__file__'): # PyPy 2.4 has no __file__ in the builtin modules, but the code # objects still have the file names. So dig into one to find # the path to exclude. structseq_new = _structseq.structseq_new try: structseq_file = structseq_new.func_code.co_filename except AttributeError: structseq_file = structseq_new.__code__.co_filename self.pylib_paths.add(self._canonical_path(structseq_file)) # To avoid tracing the coverage.py code itself, we skip anything # located where we are. self.cover_paths = [self._canonical_path(__file__, directory=True)] if env.TESTING: # Don't include our own test code. self.cover_paths.append(os.path.join(self.cover_paths[0], "tests")) # When testing, we use PyContracts, which should be considered # part of coverage.py, and it uses six. Exclude those directories # just as we exclude ourselves. import contracts import six for mod in [contracts, six]: self.cover_paths.append(self._canonical_path(mod)) # Set the reporting precision. Numbers.set_precision(self.config.precision) atexit.register(self._atexit) # Create the matchers we need for _should_trace if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) self.source_pkgs_match = ModuleMatcher(self.source_pkgs) else: if self.cover_paths: self.cover_match = TreeMatcher(self.cover_paths) if self.pylib_paths: self.pylib_match = TreeMatcher(self.pylib_paths) if self.include: self.include_match = FnmatchMatcher(self.include) if self.omit: self.omit_match = FnmatchMatcher(self.omit) # The user may want to debug things, show info if desired. self._write_startup_debug() def _write_startup_debug(self): """Write out debug info at startup if needed.""" wrote_any = False with self.debug.without_callers(): if self.debug.should('config'): config_info = sorted(self.config.__dict__.items()) write_formatted_info(self.debug, "config", config_info) wrote_any = True if self.debug.should('sys'): write_formatted_info(self.debug, "sys", self.sys_info()) for plugin in self.plugins: header = "sys: " + plugin._coverage_plugin_name info = plugin.sys_info() write_formatted_info(self.debug, header, info) wrote_any = True if wrote_any: write_formatted_info(self.debug, "end", ()) def _canonical_path(self, morf, directory=False): """Return the canonical path of the module or file `morf`. If the module is a package, then return its directory. If it is a module, then return its file, unless `directory` is True, in which case return its enclosing directory. """ morf_path = PythonFileReporter(morf, self).filename if morf_path.endswith("__init__.py") or directory: morf_path = os.path.split(morf_path)[0] return morf_path def _name_for_module(self, module_globals, filename): """Get the name of the module for a set of globals and file name. For configurability's sake, we allow __main__ modules to be matched by their importable name. If loaded via runpy (aka -m), we can usually recover the "original" full dotted module name, otherwise, we resort to interpreting the file name to get the module's name. In the case that the module name can't be determined, None is returned. """ if module_globals is None: # pragma: only ironpython # IronPython doesn't provide globals: https://github.com/IronLanguages/main/issues/1296 module_globals = {} dunder_name = module_globals.get('__name__', None) if isinstance(dunder_name, str) and dunder_name != '__main__': # This is the usual case: an imported module. return dunder_name loader = module_globals.get('__loader__', None) for attrname in ('fullname', 'name'): # attribute renamed in py3.2 if hasattr(loader, attrname): fullname = getattr(loader, attrname) else: continue if isinstance(fullname, str) and fullname != '__main__': # Module loaded via: runpy -m return fullname # Script as first argument to Python command line. inspectedname = inspect.getmodulename(filename) if inspectedname is not None: return inspectedname else: return dunder_name def _should_trace_internal(self, filename, frame): """Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a FileDisposition object. """ original_filename = filename disp = _disposition_init(self.collector.file_disposition_class, filename) def nope(disp, reason): """Simple helper to make it easy to return NO.""" disp.trace = False disp.reason = reason return disp # Compiled Python files have two file names: frame.f_code.co_filename is # the file name at the time the .pyc was compiled. The second name is # __file__, which is where the .pyc was actually loaded from. Since # .pyc files can be moved after compilation (for example, by being # installed), we look for __file__ in the frame and prefer it to the # co_filename value. dunder_file = frame.f_globals and frame.f_globals.get('__file__') if dunder_file: filename = source_for_file(dunder_file) if original_filename and not original_filename.startswith('<'): orig = os.path.basename(original_filename) if orig != os.path.basename(filename): # Files shouldn't be renamed when moved. This happens when # exec'ing code. If it seems like something is wrong with # the frame's file name, then just use the original. filename = original_filename if not filename: # Empty string is pretty useless. return nope(disp, "empty string isn't a file name") if filename.startswith('memory:'): return nope(disp, "memory isn't traceable") if filename.startswith('<'): # Lots of non-file execution is represented with artificial # file names like "<string>", "<doctest readme.txt[0]>", or # "<exec_function>". Don't ever trace these executions, since we # can't do anything with the data later anyway. return nope(disp, "not a real file name") # pyexpat does a dumb thing, calling the trace function explicitly from # C code with a C file name. if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename): return nope(disp, "pyexpat lies about itself") # Jython reports the .class file to the tracer, use the source file. if filename.endswith("$py.class"): filename = filename[:-9] + ".py" canonical = canonical_filename(filename) disp.canonical_filename = canonical # Try the plugins, see if they have an opinion about the file. plugin = None for plugin in self.plugins.file_tracers: if not plugin._coverage_enabled: continue try: file_tracer = plugin.file_tracer(canonical) if file_tracer is not None: file_tracer._coverage_plugin = plugin disp.trace = True disp.file_tracer = file_tracer if file_tracer.has_dynamic_source_filename(): disp.has_dynamic_filename = True else: disp.source_filename = canonical_filename( file_tracer.source_filename()) break except Exception: self._warn("Disabling plugin %r due to an exception:" % (plugin._coverage_plugin_name)) traceback.print_exc() plugin._coverage_enabled = False continue else: # No plugin wanted it: it's Python. disp.trace = True disp.source_filename = canonical if not disp.has_dynamic_filename: if not disp.source_filename: raise CoverageException( "Plugin %r didn't set source_filename for %r" % (plugin, disp.original_filename)) reason = self._check_include_omit_etc_internal( disp.source_filename, frame, ) if reason: nope(disp, reason) return disp def _check_include_omit_etc_internal(self, filename, frame): """Check a file name against the include, omit, etc, rules. Returns a string or None. String means, don't trace, and is the reason why. None means no reason found to not trace. """ modulename = self._name_for_module(frame.f_globals, filename) # If the user specified source or include, then that's authoritative # about the outer bound of what to measure and we don't have to apply # any canned exclusions. If they didn't, then we have to exclude the # stdlib and coverage.py directories. if self.source_match: if self.source_pkgs_match.match(modulename): if modulename in self.source_pkgs_unmatched: self.source_pkgs_unmatched.remove(modulename) return None # There's no reason to skip this file. if not self.source_match.match(filename): return "falls outside the --source trees" elif self.include_match: if not self.include_match.match(filename): return "falls outside the --include trees" else: # If we aren't supposed to trace installed code, then check if this # is near the Python standard library and skip it if so. if self.pylib_match and self.pylib_match.match(filename): return "is in the stdlib" # We exclude the coverage.py code itself, since a little of it # will be measured otherwise. if self.cover_match and self.cover_match.match(filename): return "is part of coverage.py" # Check the file against the omit pattern. if self.omit_match and self.omit_match.match(filename): return "is inside an --omit pattern" # No reason found to skip this file. return None def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename`. Calls `_should_trace_internal`, and returns the FileDisposition. """ disp = self._should_trace_internal(filename, frame) if self.debug.should('trace'): self.debug.write(_disposition_debug_msg(disp)) return disp def _check_include_omit_etc(self, filename, frame): """Check a file name against the include/omit/etc, rules, verbosely. Returns a boolean: True if the file should be traced, False if not. """ reason = self._check_include_omit_etc_internal(filename, frame) if self.debug.should('trace'): if not reason: msg = "Including %r" % (filename, ) else: msg = "Not including %r: %s" % (filename, reason) self.debug.write(msg) return not reason def _warn(self, msg, slug=None): """Use `msg` as a warning. For warning suppression, use `slug` as the shorthand. """ if slug in self.config.disable_warnings: # Don't issue the warning return self._warnings.append(msg) if slug: msg = "%s (%s)" % (msg, slug) if self.debug.should('pid'): msg = "[%d] %s" % (os.getpid(), msg) sys.stderr.write("Coverage.py warning: %s\n" % msg) def get_option(self, option_name): """Get an option from the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with `"run:branch"`. Returns the value of the option. .. versionadded:: 4.0 """ return self.config.get_option(option_name) def set_option(self, option_name, value): """Set an option in the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with ``"run:branch"``. `value` is the new value for the option. This should be a Python value where appropriate. For example, use True for booleans, not the string ``"True"``. As an example, calling:: cov.set_option("run:branch", True) has the same effect as this configuration file:: [run] branch = True .. versionadded:: 4.0 """ self.config.set_option(option_name, value) def use_cache(self, usecache): """Obsolete method.""" self._init() if not usecache: self._warn("use_cache(False) is no longer supported.") def load(self): """Load previously-collected coverage data from the data file.""" self._init() self.collector.reset() self.data_files.read(self.data) def start(self): """Start measuring code coverage. Coverage measurement only occurs in functions called after :meth:`start` is invoked. Statements in the same scope as :meth:`start` won't be measured. Once you invoke :meth:`start`, you must also call :meth:`stop` eventually, or your process might not shut down cleanly. """ self._init() if self.include: if self.source or self.source_pkgs: self._warn("--include is ignored because --source is set", slug="include-ignored") if self.run_suffix: # Calling start() means we're running code, so use the run_suffix # as the data_suffix when we eventually save the data. self.data_suffix = self.run_suffix if self._auto_load: self.load() self.collector.start() self._started = True def stop(self): """Stop measuring code coverage.""" if self._started: self.collector.stop() self._started = False def _atexit(self): """Clean up on process shutdown.""" if self.debug.should("process"): self.debug.write("atexit: {0!r}".format(self)) if self._started: self.stop() if self._auto_save: self.save() def erase(self): """Erase previously-collected coverage data. This removes the in-memory data collected in this session as well as discarding the data file. """ self._init() self.collector.reset() self.data.erase() self.data_files.erase(parallel=self.config.parallel) def clear_exclude(self, which='exclude'): """Clear the exclude list.""" self._init() setattr(self.config, which + "_list", []) self._exclude_regex_stale() def exclude(self, regex, which='exclude'): """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ self._init() excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self): """Drop all the compiled exclusion regexes, a list was modified.""" self._exclude_re.clear() def _exclude_regex(self, which): """Return a compiled regex for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which='exclude'): """Return a list of excluded regex patterns. `which` indicates which list is desired. See :meth:`exclude` for the lists that are available, and their meaning. """ self._init() return getattr(self.config, which + "_list") def save(self): """Save the collected coverage data to the data file.""" self._init() self.get_data() self.data_files.write(self.data, suffix=self.data_suffix) def combine(self, data_paths=None, strict=False): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. `data_paths` is a list of files or directories from which data should be combined. If no list is passed, then the data files from the directory indicated by the current data file (probably the current directory) will be combined. If `strict` is true, then it is an error to attempt to combine when there are no data files to combine. .. versionadded:: 4.0 The `data_paths` parameter. .. versionadded:: 4.3 The `strict` parameter. """ self._init() self.get_data() aliases = None if self.config.paths: aliases = PathAliases() for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) self.data_files.combine_parallel_data( self.data, aliases=aliases, data_paths=data_paths, strict=strict, ) def get_data(self): """Get the collected data. Also warn about various problems collecting data. Returns a :class:`coverage.CoverageData`, the collected coverage data. .. versionadded:: 4.0 """ self._init() if self.collector.save_data(self.data): self._post_save_work() return self.data def _post_save_work(self): """After saving data, look for warnings, post-work, etc. Warn about things that should have happened but didn't. Look for unexecuted files. """ # If there are still entries in the source_pkgs_unmatched list, # then we never encountered those packages. if self._warn_unimported_source: for pkg in self.source_pkgs_unmatched: self._warn_about_unmeasured_code(pkg) # Find out if we got any data. if not self.data and self._warn_no_data: self._warn("No data was collected.", slug="no-data-collected") # Find files that were never executed at all. for pkg in self.source_pkgs: if (not pkg in sys.modules or not hasattr(sys.modules[pkg], '__file__') or not os.path.exists(sys.modules[pkg].__file__)): continue pkg_file = source_for_file(sys.modules[pkg].__file__) self._find_unexecuted_files(self._canonical_path(pkg_file)) for src in self.source: self._find_unexecuted_files(src) if self.config.note: self.data.add_run_info(note=self.config.note) def _warn_about_unmeasured_code(self, pkg): """Warn about a package or module that we never traced. `pkg` is a string, the name of the package or module. """ mod = sys.modules.get(pkg) if mod is None: self._warn("Module %s was never imported." % pkg, slug="module-not-imported") return is_namespace = hasattr(mod, '__path__') and not hasattr(mod, '__file__') has_file = hasattr(mod, '__file__') and os.path.exists(mod.__file__) if is_namespace: # A namespace package. It's OK for this not to have been traced, # since there is no code directly in it. return if not has_file: self._warn("Module %s has no Python source." % pkg, slug="module-not-python") return # The module was in sys.modules, and seems like a module with code, but # we never measured it. I guess that means it was imported before # coverage even started. self._warn( "Module %s was previously imported, but not measured" % pkg, slug="module-not-measured", ) def _find_plugin_files(self, src_dir): """Get executable files from the plugins.""" for plugin in self.plugins: for x_file in plugin.find_executable_files(src_dir): yield x_file, plugin._coverage_plugin_name def _find_unexecuted_files(self, src_dir): """Find unexecuted files in `src_dir`. Search for files in `src_dir` that are probably importable, and add them as unexecuted files in `self.data`. """ py_files = ((py_file, None) for py_file in find_python_files(src_dir)) plugin_files = self._find_plugin_files(src_dir) for file_path, plugin_name in itertools.chain(py_files, plugin_files): file_path = canonical_filename(file_path) if self.omit_match and self.omit_match.match(file_path): # Turns out this file was omitted, so don't pull it back # in as unexecuted. continue self.data.touch_file(file_path, plugin_name) # Backward compatibility with version 1. def analysis(self, morf): """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf def analysis2(self, morf): """Analyze a module. `morf` is a module or a file name. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The file name for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ self._init() analysis = self._analyze(morf) return ( analysis.filename, sorted(analysis.statements), sorted(analysis.excluded), sorted(analysis.missing), analysis.missing_formatted(), ) def _analyze(self, it): """Analyze a single morf or code unit. Returns an `Analysis` object. """ self.get_data() if not isinstance(it, FileReporter): it = self._get_file_reporter(it) return Analysis(self.data, it) def _get_file_reporter(self, morf): """Get a FileReporter for a module or file name.""" plugin = None file_reporter = "python" if isinstance(morf, string_class): abs_morf = abs_file(morf) plugin_name = self.data.file_tracer(abs_morf) if plugin_name: plugin = self.plugins.get(plugin_name) if plugin: file_reporter = plugin.file_reporter(abs_morf) if file_reporter is None: raise CoverageException( "Plugin %r did not provide a file reporter for %r." % (plugin._coverage_plugin_name, morf)) if file_reporter == "python": file_reporter = PythonFileReporter(morf, self) return file_reporter def _get_file_reporters(self, morfs=None): """Get a list of FileReporters for a list of modules or file names. For each module or file name in `morfs`, find a FileReporter. Return the list of FileReporters. If `morfs` is a single module or file name, this returns a list of one FileReporter. If `morfs` is empty or None, then the list of all files measured is used to find the FileReporters. """ if not morfs: morfs = self.data.measured_files() # Be sure we have a list. if not isinstance(morfs, (list, tuple)): morfs = [morfs] file_reporters = [] for morf in morfs: file_reporter = self._get_file_reporter(morf) file_reporters.append(file_reporter) return file_reporters def report( self, morfs=None, show_missing=None, ignore_errors=None, file=None, # pylint: disable=redefined-builtin omit=None, include=None, skip_covered=None, ): """Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. `include` is a list of file name patterns. Files that match will be included in the report. Files matching `omit` will not be included in the report. If `skip_covered` is True, don't report on files with 100% coverage. Returns a float, the total percentage covered. """ self.get_data() self.config.from_args( ignore_errors=ignore_errors, report_omit=omit, report_include=include, show_missing=show_missing, skip_covered=skip_covered, ) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file) def annotate( self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, ): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See :meth:`report` for other arguments. """ self.get_data() self.config.from_args(ignore_errors=ignore_errors, report_omit=omit, report_include=include) reporter = AnnotateReporter(self, self.config) reporter.report(morfs, directory=directory) def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, extra_css=None, title=None, skip_covered=None): """Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the overview starting point, with links to more detailed pages for individual modules. `extra_css` is a path to a file of other CSS to apply on the page. It will be copied into the HTML directory. `title` is a text string (not HTML) to use as the title of the HTML report. See :meth:`report` for other arguments. Returns a float, the total percentage covered. """ self.get_data() self.config.from_args( ignore_errors=ignore_errors, report_omit=omit, report_include=include, html_dir=directory, extra_css=extra_css, html_title=title, skip_covered=skip_covered, ) reporter = HtmlReporter(self, self.config) return reporter.report(morfs) def xml_report( self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None, ): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See :meth:`report` for other arguments. Returns a float, the total percentage covered. """ self.get_data() self.config.from_args( ignore_errors=ignore_errors, report_omit=omit, report_include=include, xml_output=outfile, ) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: # Ensure that the output directory is created; done here # because this report pre-opens the output file. # HTMLReport does this using the Report plumbing because # its task is more complex, being multiple files. output_dir = os.path.dirname(self.config.xml_output) if output_dir and not os.path.isdir(output_dir): os.makedirs(output_dir) open_kwargs = {} if env.PY3: open_kwargs['encoding'] = 'utf8' outfile = open(self.config.xml_output, "w", **open_kwargs) file_to_close = outfile try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output) def sys_info(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod self._init() ft_plugins = [] for ft in self.plugins.file_tracers: ft_name = ft._coverage_plugin_name if not ft._coverage_enabled: ft_name += " (disabled)" ft_plugins.append(ft_name) info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), ('cover_paths', self.cover_paths), ('pylib_paths', self.pylib_paths), ('tracer', self.collector.tracer_name()), ('plugins.file_tracers', ft_plugins), ('config_files', self.config.attempted_config_files), ('configs_read', self.config.config_files), ('data_path', self.data_files.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', platform.python_implementation()), ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), ('environment', sorted(("%s = %s" % (k, v)) for k, v in iitems(os.environ) if k.startswith(("COV", "PY")))), ('command_line', " ".join(getattr(sys, 'argv', ['???']))), ] matcher_names = [ 'source_match', 'source_pkgs_match', 'include_match', 'omit_match', 'cover_match', 'pylib_match', ] for matcher_name in matcher_names: matcher = getattr(self, matcher_name) if matcher: matcher_info = matcher.info() else: matcher_info = '-none-' info.append((matcher_name, matcher_info)) return info
def test_touch_file_with_lines(self): covdata = CoverageData() covdata.set_lines(LINES_1) covdata.touch_file('zzz.py') self.assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py'])
class coverage(object): """Programmatic access to coverage.py. To use:: from coverage import coverage cov = coverage() cov.start() #.. call your code .. cov.stop() cov.html_report(directory='covhtml') """ def __init__(self, data_file=None, data_suffix=None, cover_pylib=None, auto_data=False, timid=None, branch=None, config_file=True, source=None, omit=None, include=None, debug=None, debug_file=None): """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended (with a dot) to `data_file` to create the final file name. If `data_suffix` is simply True, then a suffix is created with the machine and process identity included. `cover_pylib` is a boolean determining whether Python code installed with the Python interpreter is measured. This includes the Python standard library and any packages installed with the interpreter. If `auto_data` is true, then any existing data file will be read when coverage measurement starts, and data will be saved automatically when measurement stops. If `timid` is true, then a slower and simpler trace function will be used. This is important for some environments where manipulation of tracing functions breaks the faster trace function. If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. `config_file` determines what config file to read. If it is a string, it is the name of the config file to read. If it is True, then a standard file is read (".coveragerc"). If it is False, then no file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. `include` and `omit` are lists of filename patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. `debug` is a list of strings indicating what debugging information is desired. `debug_file` is the file to write debug messages to, defaulting to stderr. """ from coverage import __version__ # A record of all the warnings that have been issued. self._warnings = [] # Build our configuration from a number of sources: # 1: defaults: self.config = CoverageConfig() # 2: from the coveragerc file: if config_file: if config_file is True: config_file = ".coveragerc" try: self.config.from_file(config_file) except ValueError: _, err, _ = sys.exc_info() raise CoverageException("Couldn't read config file %s: %s" % (config_file, err)) # 3: from environment variables: self.config.from_environment('COVERAGE_OPTIONS') env_data_file = os.environ.get('COVERAGE_FILE') if env_data_file: self.config.data_file = env_data_file # 4: from constructor arguments: self.config.from_args( data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include, debug=debug, ) # Create and configure the debugging controller. self.debug = DebugControl(self.config.debug, debug_file or sys.stderr) self.auto_data = auto_data # _exclude_re is a dict mapping exclusion list names to compiled # regexes. self._exclude_re = {} self._exclude_regex_stale() self.file_locator = FileLocator() # The source argument can be directories or package names. self.source = [] self.source_pkgs = [] for src in self.config.source or []: if os.path.exists(src): self.source.append(self.file_locator.canonical_filename(src)) else: self.source_pkgs.append(src) self.omit = prep_patterns(self.config.omit) self.include = prep_patterns(self.config.include) self.collector = Collector(self._should_trace, timid=self.config.timid, branch=self.config.branch, warn=self._warn) # Suffixes are a bit tricky. We want to use the data suffix only when # collecting data, not when combining data. So we save it as # `self.run_suffix` now, and promote it to `self.data_suffix` if we # find that we are collecting data later. if data_suffix or self.config.parallel: if not isinstance(data_suffix, string_class): # if data_suffix=True, use .machinename.pid.random data_suffix = True else: data_suffix = None self.data_suffix = None self.run_suffix = data_suffix # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. self.data = CoverageData( basename=self.config.data_file, collector="coverage v%s" % __version__, debug=self.debug, ) # The dirs for files considered "installed with the interpreter". self.pylib_dirs = [] if not self.config.cover_pylib: # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some # environments (virtualenv, for example), these modules may be # spread across a few locations. Look at all the candidate modules # we've imported, and take all the different ones. for m in (atexit, os, random, socket, _structseq): if m is not None and hasattr(m, "__file__"): m_dir = self._canonical_dir(m) if m_dir not in self.pylib_dirs: self.pylib_dirs.append(m_dir) # To avoid tracing the coverage code itself, we skip anything located # where we are. self.cover_dir = self._canonical_dir(__file__) # The matchers for _should_trace. self.source_match = None self.pylib_match = self.cover_match = None self.include_match = self.omit_match = None # Set the reporting precision. Numbers.set_precision(self.config.precision) # Is it ok for no data to be collected? self._warn_no_data = True self._warn_unimported_source = True # State machine variables: # Have we started collecting and not stopped it? self._started = False # Have we measured some data and not harvested it? self._measured = False atexit.register(self._atexit) def _canonical_dir(self, morf): """Return the canonical directory of the module or file `morf`.""" return os.path.split(CodeUnit(morf, self.file_locator).filename)[0] def _source_for_file(self, filename): """Return the source file for `filename`.""" if not filename.endswith(".py"): if filename[-4:-1] == ".py": filename = filename[:-1] elif filename.endswith("$py.class"): # jython filename = filename[:-9] + ".py" return filename def _should_trace_with_reason(self, filename, frame): """Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a pair of values: the first indicates whether the file should be traced: it's a canonicalized filename if it should be traced, None if it should not. The second value is a string, the resason for the decision. """ if not filename: # Empty string is pretty useless return None, "empty string isn't a filename" if filename.startswith('<'): # Lots of non-file execution is represented with artificial # filenames like "<string>", "<doctest readme.txt[0]>", or # "<exec_function>". Don't ever trace these executions, since we # can't do anything with the data later anyway. return None, "not a real filename" self._check_for_packages() # Compiled Python files have two filenames: frame.f_code.co_filename is # the filename at the time the .pyc was compiled. The second name is # __file__, which is where the .pyc was actually loaded from. Since # .pyc files can be moved after compilation (for example, by being # installed), we look for __file__ in the frame and prefer it to the # co_filename value. dunder_file = frame.f_globals.get('__file__') if dunder_file: filename = self._source_for_file(dunder_file) # Jython reports the .class file to the tracer, use the source file. if filename.endswith("$py.class"): filename = filename[:-9] + ".py" canonical = self.file_locator.canonical_filename(filename) # If the user specified source or include, then that's authoritative # about the outer bound of what to measure and we don't have to apply # any canned exclusions. If they didn't, then we have to exclude the # stdlib and coverage.py directories. if self.source_match: if not self.source_match.match(canonical): return None, "falls outside the --source trees" elif self.include_match: if not self.include_match.match(canonical): return None, "falls outside the --include trees" else: # If we aren't supposed to trace installed code, then check if this # is near the Python standard library and skip it if so. if self.pylib_match and self.pylib_match.match(canonical): return None, "is in the stdlib" # We exclude the coverage code itself, since a little of it will be # measured otherwise. if self.cover_match and self.cover_match.match(canonical): return None, "is part of coverage.py" # Check the file against the omit pattern. if self.omit_match and self.omit_match.match(canonical): return None, "is inside an --omit pattern" return canonical, "because we love you" def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename`. Calls `_should_trace_with_reason`, and returns just the decision. """ canonical, reason = self._should_trace_with_reason(filename, frame) if self.debug.should('trace'): if not canonical: msg = "Not tracing %r: %s" % (filename, reason) else: msg = "Tracing %r" % (filename, ) self.debug.write(msg) return canonical def _warn(self, msg): """Use `msg` as a warning.""" self._warnings.append(msg) sys.stderr.write("Coverage.py warning: %s\n" % msg) def _check_for_packages(self): """Update the source_match matcher with latest imported packages.""" # Our self.source_pkgs attribute is a list of package names we want to # measure. Each time through here, we see if we've imported any of # them yet. If so, we add its file to source_match, and we don't have # to look for that package any more. if self.source_pkgs: found = [] for pkg in self.source_pkgs: try: mod = sys.modules[pkg] except KeyError: continue found.append(pkg) try: pkg_file = mod.__file__ except AttributeError: pkg_file = None else: d, f = os.path.split(pkg_file) if f.startswith('__init__'): # This is actually a package, return the directory. pkg_file = d else: pkg_file = self._source_for_file(pkg_file) pkg_file = self.file_locator.canonical_filename(pkg_file) if not os.path.exists(pkg_file): pkg_file = None if pkg_file: self.source.append(pkg_file) self.source_match.add(pkg_file) else: self._warn("Module %s has no Python source." % pkg) for pkg in found: self.source_pkgs.remove(pkg) def use_cache(self, usecache): """Control the use of a data file (incorrectly called a cache). `usecache` is true or false, whether to read and write data on disk. """ self.data.usefile(usecache) def load(self): """Load previously-collected coverage data from the data file.""" self.collector.reset() self.data.read() def start(self): """Start measuring code coverage. Coverage measurement actually occurs in functions called after `start` is invoked. Statements in the same scope as `start` won't be measured. Once you invoke `start`, you must also call `stop` eventually, or your process might not shut down cleanly. """ if self.run_suffix: # Calling start() means we're running code, so use the run_suffix # as the data_suffix when we eventually save the data. self.data_suffix = self.run_suffix if self.auto_data: self.load() # Create the matchers we need for _should_trace if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) else: if self.cover_dir: self.cover_match = TreeMatcher([self.cover_dir]) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: self.include_match = FnmatchMatcher(self.include) if self.omit: self.omit_match = FnmatchMatcher(self.omit) # The user may want to debug things, show info if desired. if self.debug.should('config'): self.debug.write("Configuration values:") config_info = sorted(self.config.__dict__.items()) self.debug.write_formatted_info(config_info) if self.debug.should('sys'): self.debug.write("Debugging info:") self.debug.write_formatted_info(self.sysinfo()) self.collector.start() self._started = True self._measured = True def stop(self): """Stop measuring code coverage.""" self._started = False self.collector.stop() def _atexit(self): """Clean up on process shutdown.""" if self._started: self.stop() if self.auto_data: self.save() def erase(self): """Erase previously-collected coverage data. This removes the in-memory data collected in this session as well as discarding the data file. """ self.collector.reset() self.data.erase() def clear_exclude(self, which='exclude'): """Clear the exclude list.""" setattr(self.config, which + "_list", []) self._exclude_regex_stale() def exclude(self, regex, which='exclude'): """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self): """Drop all the compiled exclusion regexes, a list was modified.""" self._exclude_re.clear() def _exclude_regex(self, which): """Return a compiled regex for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which='exclude'): """Return a list of excluded regex patterns. `which` indicates which list is desired. See `exclude` for the lists that are available, and their meaning. """ return getattr(self.config, which + "_list") def save(self): """Save the collected coverage data to the data file.""" data_suffix = self.data_suffix if data_suffix is True: # If data_suffix was a simple true value, then make a suffix with # plenty of distinguishing information. We do this here in # `save()` at the last minute so that the pid will be correct even # if the process forks. extra = "" if _TEST_NAME_FILE: f = open(_TEST_NAME_FILE) test_name = f.read() f.close() extra = "." + test_name data_suffix = "%s%s.%s.%06d" % (socket.gethostname(), extra, os.getpid(), random.randint(0, 999999)) self._harvest_data() self.data.write(suffix=data_suffix) def combine(self): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. """ aliases = None if self.config.paths: aliases = PathAliases(self.file_locator) for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) self.data.combine_parallel_data(aliases=aliases) def _harvest_data(self): """Get the collected data and reset the collector. Also warn about various problems collecting data. """ if not self._measured: return self.data.add_line_data(self.collector.get_line_data()) self.data.add_arc_data(self.collector.get_arc_data()) self.collector.reset() # If there are still entries in the source_pkgs list, then we never # encountered those packages. if self._warn_unimported_source: for pkg in self.source_pkgs: self._warn("Module %s was never imported." % pkg) # Find out if we got any data. summary = self.data.summary() if not summary and self._warn_no_data: self._warn("No data was collected.") # Find files that were never executed at all. for src in self.source: for py_file in find_python_files(src): py_file = self.file_locator.canonical_filename(py_file) if self.omit_match and self.omit_match.match(py_file): # Turns out this file was omitted, so don't pull it back # in as unexecuted. continue self.data.touch_file(py_file) self._measured = False # Backward compatibility with version 1. def analysis(self, morf): """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf def analysis2(self, morf): """Analyze a module. `morf` is a module or a filename. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The filename for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ analysis = self._analyze(morf) return (analysis.filename, analysis.statements, analysis.excluded, analysis.missing, analysis.missing_formatted()) def _analyze(self, it): """Analyze a single morf or code unit. Returns an `Analysis` object. """ self._harvest_data() if not isinstance(it, CodeUnit): it = code_unit_factory(it, self.file_locator)[0] return Analysis(self, it) def report( self, morfs=None, show_missing=True, ignore_errors=None, file=None, # pylint: disable=W0622 omit=None, include=None): """Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. `include` is a list of filename patterns. Modules whose filenames match those patterns will be included in the report. Modules matching `omit` will not be included in the report. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, show_missing=show_missing, ) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file) def annotate(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See `coverage.report()` for other arguments. """ self._harvest_data() self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include) reporter = AnnotateReporter(self, self.config) reporter.report(morfs, directory=directory) def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, extra_css=None, title=None): """Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the overview starting point, with links to more detailed pages for individual modules. `extra_css` is a path to a file of other CSS to apply on the page. It will be copied into the HTML directory. `title` is a text string (not HTML) to use as the title of the HTML report. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, extra_css=extra_css, html_title=title, ) reporter = HtmlReporter(self, self.config) return reporter.report(morfs) def xml_report(self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile, ) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: outfile = open(self.config.xml_output, "w") file_to_close = outfile try: try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output) def sysinfo(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod import platform, re try: implementation = platform.python_implementation() except AttributeError: implementation = "unknown" info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), ('cover_dir', self.cover_dir), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), ('config_files', self.config.attempted_config_files), ('configs_read', self.config.config_files), ('data_path', self.data.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', implementation), ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), ('environment', sorted([("%s = %s" % (k, v)) for k, v in iitems(os.environ) if re.search(r"^COV|^PY", k)])), ('command_line', " ".join(getattr(sys, 'argv', ['???']))), ] if self.source_match: info.append(('source_match', self.source_match.info())) if self.include_match: info.append(('include_match', self.include_match.info())) if self.omit_match: info.append(('omit_match', self.omit_match.info())) if self.cover_match: info.append(('cover_match', self.cover_match.info())) if self.pylib_match: info.append(('pylib_match', self.pylib_match.info())) return info
def test_no_lines_vs_unmeasured_file(self): covdata = CoverageData() covdata.set_lines(LINES_1) covdata.touch_file('zzz.py') self.assertEqual(covdata.lines('zzz.py'), []) self.assertIsNone(covdata.lines('no_such_file.py'))
class Coverage(object): """Programmatic access to coverage.py. To use:: from coverage import Coverage cov = Coverage() cov.start() #.. call your code .. cov.stop() cov.html_report(directory='covhtml') Note: in keeping with Python custom, names starting with underscore are not part of the public API. They might stop working at any point. Please limit yourself to documented methods to avoid problems. """ # The stack of started Coverage instances. _instances = [] @classmethod def current(cls): """Get the latest started `Coverage` instance, if any. Returns: a `Coverage` instance, or None. .. versionadded:: 5.0 """ if cls._instances: return cls._instances[-1] else: return None def __init__( self, data_file=_DEFAULT_DATAFILE, data_suffix=None, cover_pylib=None, auto_data=False, timid=None, branch=None, config_file=True, source=None, omit=None, include=None, debug=None, concurrency=None, check_preimported=False, context=None, ): """ Many of these arguments duplicate and override values that can be provided in a configuration file. Parameters that are missing here will use values from the config file. `data_file` is the base name of the data file to use. The config value defaults to ".coverage". None can be provided to prevent writing a data file. `data_suffix` is appended (with a dot) to `data_file` to create the final file name. If `data_suffix` is simply True, then a suffix is created with the machine and process identity included. `cover_pylib` is a boolean determining whether Python code installed with the Python interpreter is measured. This includes the Python standard library and any packages installed with the interpreter. If `auto_data` is true, then any existing data file will be read when coverage measurement starts, and data will be saved automatically when measurement stops. If `timid` is true, then a slower and simpler trace function will be used. This is important for some environments where manipulation of tracing functions breaks the faster trace function. If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. `config_file` determines what configuration file to read: * If it is ".coveragerc", it is interpreted as if it were True, for backward compatibility. * If it is a string, it is the name of the file to read. If the file can't be read, it is an error. * If it is True, then a few standard files names are tried (".coveragerc", "setup.cfg", "tox.ini"). It is not an error for these files to not be found. * If it is False, then no configuration file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. `include` and `omit` are lists of file name patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. `debug` is a list of strings indicating what debugging information is desired. `concurrency` is a string indicating the concurrency library being used in the measured code. Without this, coverage.py will get incorrect results if these libraries are in use. Valid strings are "greenlet", "eventlet", "gevent", "multiprocessing", or "thread" (the default). This can also be a list of these strings. If `check_preimported` is true, then when coverage is started, the already-imported files will be checked to see if they should be measured by coverage. Importing measured files before coverage is started can mean that code is missed. `context` is a string to use as the :ref:`static context <static_contexts>` label for collected data. .. versionadded:: 4.0 The `concurrency` parameter. .. versionadded:: 4.2 The `concurrency` parameter can now be a list of strings. .. versionadded:: 5.0 The `check_preimported` and `context` parameters. """ # data_file=None means no disk file at all. data_file missing means # use the value from the config file. self._no_disk = data_file is None if data_file is _DEFAULT_DATAFILE: data_file = None # Build our configuration from a number of sources. self.config = read_coverage_config( config_file=config_file, data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, run_omit=omit, run_include=include, debug=debug, report_omit=omit, report_include=include, concurrency=concurrency, context=context, ) # This is injectable by tests. self._debug_file = None self._auto_load = self._auto_save = auto_data self._data_suffix_specified = data_suffix # Is it ok for no data to be collected? self._warn_no_data = True self._warn_unimported_source = True self._warn_preimported_source = check_preimported # A record of all the warnings that have been issued. self._warnings = [] # Other instance attributes, set later. self._data = self._collector = None self._plugins = None self._inorout = None self._inorout_class = InOrOut self._data_suffix = self._run_suffix = None self._exclude_re = None self._debug = None self._file_mapper = None # State machine variables: # Have we initialized everything? self._inited = False self._inited_for_start = False # Have we started collecting and not stopped it? self._started = False # Have we written --debug output? self._wrote_debug = False # If we have sub-process measurement happening automatically, then we # want any explicit creation of a Coverage object to mean, this process # is already coverage-aware, so don't auto-measure it. By now, the # auto-creation of a Coverage object has already happened. But we can # find it and tell it not to save its data. if not env.METACOV: _prevent_sub_process_measurement() def _init(self): """Set all the initial state. This is called by the public methods to initialize state. This lets us construct a :class:`Coverage` object, then tweak its state before this function is called. """ if self._inited: return self._inited = True # Create and configure the debugging controller. COVERAGE_DEBUG_FILE # is an environment variable, the name of a file to append debug logs # to. self._debug = DebugControl(self.config.debug, self._debug_file) if "multiprocessing" in (self.config.concurrency or ()): # Multi-processing uses parallel for the subprocesses, so also use # it for the main process. self.config.parallel = True # _exclude_re is a dict that maps exclusion list names to compiled regexes. self._exclude_re = {} set_relative_directory() self._file_mapper = relative_filename if self.config.relative_files else abs_file # Load plugins self._plugins = Plugins.load_plugins(self.config.plugins, self.config, self._debug) # Run configuring plugins. for plugin in self._plugins.configurers: # We need an object with set_option and get_option. Either self or # self.config will do. Choosing randomly stops people from doing # other things with those objects, against the public API. Yes, # this is a bit childish. :) plugin.configure([self, self.config][int(time.time()) % 2]) def _post_init(self): """Stuff to do after everything is initialized.""" if not self._wrote_debug: self._wrote_debug = True self._write_startup_debug() def _write_startup_debug(self): """Write out debug info at startup if needed.""" wrote_any = False with self._debug.without_callers(): if self._debug.should('config'): config_info = sorted(self.config.__dict__.items()) config_info = [(k, v) for k, v in config_info if not k.startswith('_')] write_formatted_info(self._debug, "config", config_info) wrote_any = True if self._debug.should('sys'): write_formatted_info(self._debug, "sys", self.sys_info()) for plugin in self._plugins: header = "sys: " + plugin._coverage_plugin_name info = plugin.sys_info() write_formatted_info(self._debug, header, info) wrote_any = True if wrote_any: write_formatted_info(self._debug, "end", ()) def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename`. Calls `_should_trace_internal`, and returns the FileDisposition. """ disp = self._inorout.should_trace(filename, frame) if self._debug.should('trace'): self._debug.write(disposition_debug_msg(disp)) return disp def _check_include_omit_etc(self, filename, frame): """Check a file name against the include/omit/etc, rules, verbosely. Returns a boolean: True if the file should be traced, False if not. """ reason = self._inorout.check_include_omit_etc(filename, frame) if self._debug.should('trace'): if not reason: msg = "Including %r" % (filename, ) else: msg = "Not including %r: %s" % (filename, reason) self._debug.write(msg) return not reason def _warn(self, msg, slug=None): """Use `msg` as a warning. For warning suppression, use `slug` as the shorthand. """ if slug in self.config.disable_warnings: # Don't issue the warning return self._warnings.append(msg) if slug: msg = "%s (%s)" % (msg, slug) if self._debug.should('pid'): msg = "[%d] %s" % (os.getpid(), msg) sys.stderr.write("Coverage.py warning: %s\n" % msg) def get_option(self, option_name): """Get an option from the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with `"run:branch"`. Returns the value of the option. .. versionadded:: 4.0 """ return self.config.get_option(option_name) def set_option(self, option_name, value): """Set an option in the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with ``"run:branch"``. `value` is the new value for the option. This should be an appropriate Python value. For example, use True for booleans, not the string ``"True"``. As an example, calling:: cov.set_option("run:branch", True) has the same effect as this configuration file:: [run] branch = True .. versionadded:: 4.0 """ self.config.set_option(option_name, value) def load(self): """Load previously-collected coverage data from the data file.""" self._init() if self._collector: self._collector.reset() should_skip = self.config.parallel and not os.path.exists( self.config.data_file) if not should_skip: self._init_data(suffix=None) self._post_init() if not should_skip: self._data.read() def _init_for_start(self): """Initialization for start()""" # Construct the collector. concurrency = self.config.concurrency or () if "multiprocessing" in concurrency: if not patch_multiprocessing: raise CoverageException( # pragma: only jython "multiprocessing is not supported on this Python") patch_multiprocessing(rcfile=self.config.config_file) dycon = self.config.dynamic_context if not dycon or dycon == "none": context_switchers = [] elif dycon == "test_function": context_switchers = [should_start_context_test_function] else: raise CoverageException( "Don't understand dynamic_context setting: {!r}".format(dycon)) context_switchers.extend(plugin.dynamic_context for plugin in self._plugins.context_switchers) should_start_context = combine_context_switchers(context_switchers) self._collector = Collector( should_trace=self._should_trace, check_include=self._check_include_omit_etc, should_start_context=should_start_context, file_mapper=self._file_mapper, timid=self.config.timid, branch=self.config.branch, warn=self._warn, concurrency=concurrency, ) suffix = self._data_suffix_specified if suffix or self.config.parallel: if not isinstance(suffix, string_class): # if data_suffix=True, use .machinename.pid.random suffix = True else: suffix = None self._init_data(suffix) self._collector.use_data(self._data, self.config.context) # Early warning if we aren't going to be able to support plugins. if self._plugins.file_tracers and not self._collector.supports_plugins: self._warn("Plugin file tracers (%s) aren't supported with %s" % ( ", ".join(plugin._coverage_plugin_name for plugin in self._plugins.file_tracers), self._collector.tracer_name(), )) for plugin in self._plugins.file_tracers: plugin._coverage_enabled = False # Create the file classifying substructure. self._inorout = self._inorout_class(warn=self._warn) self._inorout.configure(self.config) self._inorout.plugins = self._plugins self._inorout.disp_class = self._collector.file_disposition_class atexit.register(self._atexit) def _init_data(self, suffix): """Create a data file if we don't have one yet.""" if self._data is None: # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. ensure_dir_for_file(self.config.data_file) self._data = CoverageData( basename=self.config.data_file, suffix=suffix, warn=self._warn, debug=self._debug, no_disk=self._no_disk, ) def start(self): """Start measuring code coverage. Coverage measurement only occurs in functions called after :meth:`start` is invoked. Statements in the same scope as :meth:`start` won't be measured. Once you invoke :meth:`start`, you must also call :meth:`stop` eventually, or your process might not shut down cleanly. """ self._init() if not self._inited_for_start: self._inited_for_start = True self._init_for_start() self._post_init() # Issue warnings for possible problems. self._inorout.warn_conflicting_settings() # See if we think some code that would eventually be measured has # already been imported. if self._warn_preimported_source: self._inorout.warn_already_imported_files() if self._auto_load: self.load() self._collector.start() self._started = True self._instances.append(self) def stop(self): """Stop measuring code coverage.""" if self._instances: if self._instances[-1] is self: self._instances.pop() if self._started: self._collector.stop() self._started = False def _atexit(self): """Clean up on process shutdown.""" if self._debug.should("process"): self._debug.write("atexit: {!r}".format(self)) if self._started: self.stop() if self._auto_save: self.save() def erase(self): """Erase previously collected coverage data. This removes the in-memory data collected in this session as well as discarding the data file. """ self._init() self._post_init() if self._collector: self._collector.reset() self._init_data(suffix=None) self._data.erase(parallel=self.config.parallel) self._data = None self._inited_for_start = False def switch_context(self, new_context): """Switch to a new dynamic context. `new_context` is a string to use as the :ref:`dynamic context <dynamic_contexts>` label for collected data. If a :ref:`static context <static_contexts>` is in use, the static and dynamic context labels will be joined together with a pipe character. Coverage collection must be started already. .. versionadded:: 5.0 """ if not self._started: # pragma: part started raise CoverageException( "Cannot switch context, coverage is not started") self._collector.switch_context(new_context) def clear_exclude(self, which='exclude'): """Clear the exclude list.""" self._init() setattr(self.config, which + "_list", []) self._exclude_regex_stale() def exclude(self, regex, which='exclude'): """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ self._init() excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self): """Drop all the compiled exclusion regexes, a list was modified.""" self._exclude_re.clear() def _exclude_regex(self, which): """Return a compiled regex for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which='exclude'): """Return a list of excluded regex patterns. `which` indicates which list is desired. See :meth:`exclude` for the lists that are available, and their meaning. """ self._init() return getattr(self.config, which + "_list") def save(self): """Save the collected coverage data to the data file.""" data = self.get_data() data.write() def combine(self, data_paths=None, strict=False): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. `data_paths` is a list of files or directories from which data should be combined. If no list is passed, then the data files from the directory indicated by the current data file (probably the current directory) will be combined. If `strict` is true, then it is an error to attempt to combine when there are no data files to combine. .. versionadded:: 4.0 The `data_paths` parameter. .. versionadded:: 4.3 The `strict` parameter. """ self._init() self._init_data(suffix=None) self._post_init() self.get_data() aliases = None if self.config.paths: aliases = PathAliases() for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) combine_parallel_data(self._data, aliases=aliases, data_paths=data_paths, strict=strict) def get_data(self): """Get the collected data. Also warn about various problems collecting data. Returns a :class:`coverage.CoverageData`, the collected coverage data. .. versionadded:: 4.0 """ self._init() self._init_data(suffix=None) self._post_init() if self._collector and self._collector.flush_data(): self._post_save_work() return self._data def _post_save_work(self): """After saving data, look for warnings, post-work, etc. Warn about things that should have happened but didn't. Look for unexecuted files. """ # If there are still entries in the source_pkgs_unmatched list, # then we never encountered those packages. if self._warn_unimported_source: self._inorout.warn_unimported_source() # Find out if we got any data. if not self._data and self._warn_no_data: self._warn("No data was collected.", slug="no-data-collected") # Touch all the files that could have executed, so that we can # mark completely unexecuted files as 0% covered. if self._data: for file_path, plugin_name in self._inorout.find_possibly_unexecuted_files( ): file_path = self._file_mapper(file_path) self._data.touch_file(file_path, plugin_name) if self.config.note: self._warn("The '[run] note' setting is no longer supported.") # Backward compatibility with version 1. def analysis(self, morf): """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf def analysis2(self, morf): """Analyze a module. `morf` is a module or a file name. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The file name for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ analysis = self._analyze(morf) return ( analysis.filename, sorted(analysis.statements), sorted(analysis.excluded), sorted(analysis.missing), analysis.missing_formatted(), ) def _analyze(self, it): """Analyze a single morf or code unit. Returns an `Analysis` object. """ # All reporting comes through here, so do reporting initialization. self._init() Numbers.set_precision(self.config.precision) self._post_init() data = self.get_data() if not isinstance(it, FileReporter): it = self._get_file_reporter(it) return Analysis(data, it, self._file_mapper) def _get_file_reporter(self, morf): """Get a FileReporter for a module or file name.""" plugin = None file_reporter = "python" if isinstance(morf, string_class): mapped_morf = self._file_mapper(morf) plugin_name = self._data.file_tracer(mapped_morf) if plugin_name: plugin = self._plugins.get(plugin_name) if plugin: file_reporter = plugin.file_reporter(mapped_morf) if file_reporter is None: raise CoverageException( "Plugin %r did not provide a file reporter for %r." % (plugin._coverage_plugin_name, morf)) if file_reporter == "python": file_reporter = PythonFileReporter(morf, self) return file_reporter def _get_file_reporters(self, morfs=None): """Get a list of FileReporters for a list of modules or file names. For each module or file name in `morfs`, find a FileReporter. Return the list of FileReporters. If `morfs` is a single module or file name, this returns a list of one FileReporter. If `morfs` is empty or None, then the list of all files measured is used to find the FileReporters. """ if not morfs: morfs = self._data.measured_files() # Be sure we have a collection. if not isinstance(morfs, (list, tuple, set)): morfs = [morfs] file_reporters = [self._get_file_reporter(morf) for morf in morfs] return file_reporters def report( self, morfs=None, show_missing=None, ignore_errors=None, file=None, omit=None, include=None, skip_covered=None, contexts=None, skip_empty=None, ): """Write a textual summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. If `show_missing` is true, then details of which lines or branches are missing will be included in the report. If `ignore_errors` is true, then a failure while reporting a single file will not stop the entire report. `file` is a file-like object, suitable for writing. `include` is a list of file name patterns. Files that match will be included in the report. Files matching `omit` will not be included in the report. If `skip_covered` is true, don't report on files with 100% coverage. If `skip_empty` is true, don't report on empty files (those that have no statements). `contexts` is a list of regular expressions. Only data from :ref:`dynamic contexts <dynamic_contexts>` that match one of those expressions (using :func:`re.search <python:re.search>`) will be included in the report. All of the arguments default to the settings read from the :ref:`configuration file <config>`. Returns a float, the total percentage covered. .. versionadded:: 4.0 The `skip_covered` parameter. .. versionadded:: 5.0 The `contexts` and `skip_empty` parameters. """ with override_config( self, ignore_errors=ignore_errors, report_omit=omit, report_include=include, show_missing=show_missing, skip_covered=skip_covered, report_contexts=contexts, skip_empty=skip_empty, ): reporter = SummaryReporter(self) return reporter.report(morfs, outfile=file) def annotate( self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, contexts=None, ): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See :meth:`report` for other arguments. """ with override_config( self, ignore_errors=ignore_errors, report_omit=omit, report_include=include, report_contexts=contexts, ): reporter = AnnotateReporter(self) reporter.report(morfs, directory=directory) def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, extra_css=None, title=None, skip_covered=None, show_contexts=None, contexts=None, skip_empty=None): """Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the overview starting point, with links to more detailed pages for individual modules. `extra_css` is a path to a file of other CSS to apply on the page. It will be copied into the HTML directory. `title` is a text string (not HTML) to use as the title of the HTML report. See :meth:`report` for other arguments. Returns a float, the total percentage covered. .. note:: The HTML report files are generated incrementally based on the source files and coverage results. If you modify the report files, the changes will not be considered. You should be careful about changing the files in the report folder. """ with override_config( self, ignore_errors=ignore_errors, report_omit=omit, report_include=include, html_dir=directory, extra_css=extra_css, html_title=title, skip_covered=skip_covered, show_contexts=show_contexts, report_contexts=contexts, skip_empty=skip_empty, ): reporter = HtmlReporter(self) return reporter.report(morfs) def xml_report( self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None, contexts=None, ): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See :meth:`report` for other arguments. Returns a float, the total percentage covered. """ with override_config( self, ignore_errors=ignore_errors, report_omit=omit, report_include=include, xml_output=outfile, report_contexts=contexts, ): return render_report(self.config.xml_output, XmlReporter(self), morfs) def json_report(self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None, contexts=None, pretty_print=None, show_contexts=None): """Generate a JSON report of coverage results. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See :meth:`report` for other arguments. Returns a float, the total percentage covered. .. versionadded:: 5.0 """ with override_config(self, ignore_errors=ignore_errors, report_omit=omit, report_include=include, json_output=outfile, report_contexts=contexts, json_pretty_print=pretty_print, json_show_contexts=show_contexts): return render_report(self.config.json_output, JsonReporter(self), morfs) def sys_info(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod self._init() self._post_init() def plugin_info(plugins): """Make an entry for the sys_info from a list of plug-ins.""" entries = [] for plugin in plugins: entry = plugin._coverage_plugin_name if not plugin._coverage_enabled: entry += " (disabled)" entries.append(entry) return entries info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), ('tracer', self._collector.tracer_name() if self._collector else "-none-"), ('CTracer', 'available' if CTracer else "unavailable"), ('plugins.file_tracers', plugin_info(self._plugins.file_tracers)), ('plugins.configurers', plugin_info(self._plugins.configurers)), ('plugins.context_switchers', plugin_info(self._plugins.context_switchers)), ('configs_attempted', self.config.attempted_config_files), ('configs_read', self.config.config_files_read), ('config_file', self.config.config_file), ('config_contents', repr(self.config._config_contents) if self.config._config_contents else '-none-'), ('data_file', self._data.filename if self._data else "-none-"), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', platform.python_implementation()), ('executable', sys.executable), ('def_encoding', sys.getdefaultencoding()), ('fs_encoding', sys.getfilesystemencoding()), ('pid', os.getpid()), ('cwd', os.getcwd()), ('path', sys.path), ('environment', sorted(("%s = %s" % (k, v)) for k, v in iitems(os.environ) if any(slug in k for slug in ("COV", "PY")))), ('command_line', " ".join(getattr(sys, 'argv', ['-none-']))), ] if self._inorout: info.extend(self._inorout.sys_info()) info.extend(CoverageData.sys_info()) return info
class coverage(object): """Programmatic access to Coverage. To use:: from coverage import coverage cov = coverage() cov.start() #.. blah blah (run your code) blah blah .. cov.stop() cov.html_report(directory='covhtml') """ def __init__(self, data_file=None, data_suffix=None, cover_pylib=None, auto_data=False, timid=None, branch=None, config_file=True, source=None, omit=None, include=None): """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended (with a dot) to `data_file` to create the final file name. If `data_suffix` is simply True, then a suffix is created with the machine and process identity included. `cover_pylib` is a boolean determining whether Python code installed with the Python interpreter is measured. This includes the Python standard library and any packages installed with the interpreter. If `auto_data` is true, then any existing data file will be read when coverage measurement starts, and data will be saved automatically when measurement stops. If `timid` is true, then a slower and simpler trace function will be used. This is important for some environments where manipulation of tracing functions breaks the faster trace function. If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. `config_file` determines what config file to read. If it is a string, it is the name of the config file to read. If it is True, then a standard file is read (".coveragerc"). If it is False, then no file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. `include` and `omit` are lists of filename patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. """ from coverage import __version__ # A record of all the warnings that have been issued. self._warnings = [] # Build our configuration from a number of sources: # 1: defaults: self.config = CoverageConfig() # 2: from the coveragerc file: if config_file: if config_file is True: config_file = ".coveragerc" try: self.config.from_file(config_file) except ValueError: _, err, _ = sys.exc_info() raise CoverageException( "Couldn't read config file %s: %s" % (config_file, err) ) # 3: from environment variables: self.config.from_environment('COVERAGE_OPTIONS') env_data_file = os.environ.get('COVERAGE_FILE') if env_data_file: self.config.data_file = env_data_file # 4: from constructor arguments: if isinstance(omit, string_class): omit = [omit] if isinstance(include, string_class): include = [include] self.config.from_args( data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include ) self.auto_data = auto_data self.atexit_registered = False # _exclude_re is a dict mapping exclusion list names to compiled # regexes. self._exclude_re = {} self._exclude_regex_stale() self.file_locator = FileLocator() # The source argument can be directories or package names. self.source = [] self.source_pkgs = [] for src in self.config.source or []: if os.path.exists(src): self.source.append(self.file_locator.canonical_filename(src)) else: self.source_pkgs.append(src) self.omit = self._prep_patterns(self.config.omit) self.include = self._prep_patterns(self.config.include) self.collector = Collector( self._should_trace, timid=self.config.timid, branch=self.config.branch, warn=self._warn ) # Suffixes are a bit tricky. We want to use the data suffix only when # collecting data, not when combining data. So we save it as # `self.run_suffix` now, and promote it to `self.data_suffix` if we # find that we are collecting data later. if data_suffix or self.config.parallel: if not isinstance(data_suffix, string_class): # if data_suffix=True, use .machinename.pid.random data_suffix = True else: data_suffix = None self.data_suffix = None self.run_suffix = data_suffix # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. self.data = CoverageData( basename=self.config.data_file, collector="coverage v%s" % __version__ ) # The dirs for files considered "installed with the interpreter". self.pylib_dirs = [] if not self.config.cover_pylib: # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some # environments (virtualenv, for centralfitestoque), these modules may be # spread across a few locations. Look at all the candidate modules # we've imported, and take all the different ones. for m in (atexit, os, random, socket): if hasattr(m, "__file__"): m_dir = self._canonical_dir(m.__file__) if m_dir not in self.pylib_dirs: self.pylib_dirs.append(m_dir) # To avoid tracing the coverage code itself, we skip anything located # where we are. self.cover_dir = self._canonical_dir(__file__) # The matchers for _should_trace, created when tracing starts. self.source_match = None self.pylib_match = self.cover_match = None self.include_match = self.omit_match = None # Only _harvest_data once per measurement cycle. self._harvested = False # Set the reporting precision. Numbers.set_precision(self.config.precision) # When tearing down the coverage object, modules can become None. # Saving the modules as object attributes avoids problems, but it is # quite ad-hoc which modules need to be saved and which references # need to use the object attributes. self.socket = socket self.os = os self.random = random def _canonical_dir(self, f): """Return the canonical directory of the file `f`.""" return os.path.split(self.file_locator.canonical_filename(f))[0] def _source_for_file(self, filename): """Return the source file for `filename`.""" if not filename.endswith(".py"): if filename[-4:-1] == ".py": filename = filename[:-1] return filename def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename` This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a canonicalized filename if it should be traced, False if it should not. """ if os is None: return False if filename.startswith('<'): # Lots of non-file execution is represented with artificial # filenames like "<string>", "<doctest readme.txt[0]>", or # "<exec_function>". Don't ever trace these executions, since we # can't do anything with the data later anyway. return False if filename.endswith(".html"): # Jinja and maybe other templating systems compile templates into # Python code, but use the template filename as the filename in # the compiled code. Of course, those filenames are useless later # so don't bother collecting. TODO: How should we really separate # out good file extensions from bad? return False self._check_for_packages() # Compiled Python files have two filenames: frame.f_code.co_filename is # the filename at the time the .pyc was compiled. The second name is # __file__, which is where the .pyc was actually loaded from. Since # .pyc files can be moved after compilation (for centralfitestoque, by being # installed), we look for __file__ in the frame and prefer it to the # co_filename value. dunder_file = frame.f_globals.get('__file__') if dunder_file: filename = self._source_for_file(dunder_file) # Jython reports the .class file to the tracer, use the source file. if filename.endswith("$py.class"): filename = filename[:-9] + ".py" canonical = self.file_locator.canonical_filename(filename) # If the user specified source, then that's authoritative about what to # measure. If they didn't, then we have to exclude the stdlib and # coverage.py directories. if self.source_match: if not self.source_match.match(canonical): return False else: # If we aren't supposed to trace installed code, then check if this # is near the Python standard library and skip it if so. if self.pylib_match and self.pylib_match.match(canonical): return False # We exclude the coverage code itself, since a little of it will be # measured otherwise. if self.cover_match and self.cover_match.match(canonical): return False # Check the file against the include and omit patterns. if self.include_match and not self.include_match.match(canonical): return False if self.omit_match and self.omit_match.match(canonical): return False return canonical # To log what should_trace returns, change this to "if 1:" if 0: _real_should_trace = _should_trace def _should_trace(self, filename, frame): # pylint: disable=E0102 """A logging decorator around the real _should_trace function.""" ret = self._real_should_trace(filename, frame) print("should_trace: %r -> %r" % (filename, ret)) return ret def _warn(self, msg): """Use `msg` as a warning.""" self._warnings.append(msg) sys.stderr.write("Coverage.py warning: %s\n" % msg) def _prep_patterns(self, patterns): """Prepare the file patterns for use in a `FnmatchMatcher`. If a pattern starts with a wildcard, it is used as a pattern as-is. If it does not start with a wildcard, then it is made absolute with the current directory. If `patterns` is None, an empty list is returned. """ patterns = patterns or [] prepped = [] for p in patterns or []: if p.startswith("*") or p.startswith("?"): prepped.append(p) else: prepped.append(self.file_locator.abs_file(p)) return prepped def _check_for_packages(self): """Update the source_match matcher with latest imported packages.""" # Our self.source_pkgs attribute is a list of package names we want to # measure. Each time through here, we see if we've imported any of # them yet. If so, we add its file to source_match, and we don't have # to look for that package any more. if self.source_pkgs: found = [] for pkg in self.source_pkgs: try: mod = sys.modules[pkg] except KeyError: continue found.append(pkg) try: pkg_file = mod.__file__ except AttributeError: self._warn("Module %s has no Python source." % pkg) else: d, f = os.path.split(pkg_file) if f.startswith('__init__.'): # This is actually a package, return the directory. pkg_file = d else: pkg_file = self._source_for_file(pkg_file) pkg_file = self.file_locator.canonical_filename(pkg_file) self.source.append(pkg_file) self.source_match.add(pkg_file) for pkg in found: self.source_pkgs.remove(pkg) def use_cache(self, usecache): """Control the use of a data file (incorrectly called a cache). `usecache` is true or false, whether to read and write data on disk. """ self.data.usefile(usecache) def load(self): """Load previously-collected coverage data from the data file.""" self.collector.reset() self.data.read() def start(self): """Start measuring code coverage.""" if self.run_suffix: # Calling start() means we're running code, so use the run_suffix # as the data_suffix when we eventually save the data. self.data_suffix = self.run_suffix if self.auto_data: self.load() # Save coverage data when Python exits. if not self.atexit_registered: atexit.register(self.save) self.atexit_registered = True # Create the matchers we need for _should_trace if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) else: if self.cover_dir: self.cover_match = TreeMatcher([self.cover_dir]) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: self.include_match = FnmatchMatcher(self.include) if self.omit: self.omit_match = FnmatchMatcher(self.omit) self._harvested = False self.collector.start() def stop(self): """Stop measuring code coverage.""" self.collector.stop() self._harvest_data() def erase(self): """Erase previously-collected coverage data. This removes the in-memory data collected in this session as well as discarding the data file. """ self.collector.reset() self.data.erase() def clear_exclude(self, which='exclude'): """Clear the exclude list.""" setattr(self.config, which + "_list", []) self._exclude_regex_stale() def exclude(self, regex, which='exclude'): """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self): """Drop all the compiled exclusion regexes, a list was modified.""" self._exclude_re.clear() def _exclude_regex(self, which): """Return a compiled regex for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which='exclude'): """Return a list of excluded regex patterns. `which` indicates which list is desired. See `exclude` for the lists that are available, and their meaning. """ return getattr(self.config, which + "_list") def save(self): """Save the collected coverage data to the data file.""" data_suffix = self.data_suffix if data_suffix is True: # If data_suffix was a simple true value, then make a suffix with # plenty of distinguishing information. We do this here in # `save()` at the last minute so that the pid will be correct even # if the process forks. data_suffix = "%s.%s.%06d" % ( self.socket.gethostname(), self.os.getpid(), self.random.randint(0, 99999) ) self._harvest_data() self.data.write(suffix=data_suffix) def combine(self): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. """ self.data.combine_parallel_data() def _harvest_data(self): """Get the collected data and reset the collector. Also warn about various problems collecting data. """ if not self._harvested: self.data.add_line_data(self.collector.get_line_data()) self.data.add_arc_data(self.collector.get_arc_data()) self.collector.reset() # If there are still entries in the source_pkgs list, then we never # encountered those packages. for pkg in self.source_pkgs: self._warn("Module %s was never imported." % pkg) # Find out if we got any data. summary = self.data.summary() if not summary: self._warn("No data was collected.") # Find files that were never executed at all. for src in self.source: for py_file in find_python_files(src): self.data.touch_file(py_file) self._harvested = True # Backward compatibility with version 1. def analysis(self, morf): """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf def analysis2(self, morf): """Analyze a module. `morf` is a module or a filename. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The filename for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ analysis = self._analyze(morf) return ( analysis.filename, analysis.statements, analysis.excluded, analysis.missing, analysis.missing_formatted() ) def _analyze(self, it): """Analyze a single morf or code unit. Returns an `Analysis` object. """ if not isinstance(it, CodeUnit): it = code_unit_factory(it, self.file_locator)[0] return Analysis(self, it) def report(self, morfs=None, show_missing=True, ignore_errors=None, file=None, # pylint: disable=W0622 omit=None, include=None ): """Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. `include` is a list of filename patterns. Modules whose filenames match those patterns will be included in the report. Modules matching `omit` will not be included in the report. """ self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include ) reporter = SummaryReporter( self, show_missing, self.config.ignore_errors ) reporter.report(morfs, outfile=file, config=self.config) def annotate(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See `coverage.report()` for other arguments. """ self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include ) reporter = AnnotateReporter(self, self.config.ignore_errors) reporter.report(morfs, config=self.config, directory=directory) def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None): """Generate an HTML report. See `coverage.report()` for other arguments. """ self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, ) reporter = HtmlReporter(self, self.config.ignore_errors) reporter.report(morfs, config=self.config) def xml_report(self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See `coverage.report()` for other arguments. """ self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile, ) file_to_close = None if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: outfile = open(self.config.xml_output, "w") file_to_close = outfile try: reporter = XmlReporter(self, self.config.ignore_errors) reporter.report(morfs, outfile=outfile, config=self.config) finally: if file_to_close: file_to_close.close() def sysinfo(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod import platform, re info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), ('cover_dir', self.cover_dir), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), ('data_path', self.data.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('cwd', os.getcwd()), ('path', sys.path), ('environment', [ ("%s = %s" % (k, v)) for k, v in os.environ.items() if re.search("^COV|^PY", k) ]), ] return info
class Coverage(object): """Programmatic access to coverage.py. To use:: from coverage import Coverage cov = Coverage() cov.start() #.. call your code .. cov.stop() cov.html_report(directory='covhtml') """ def __init__( self, data_file=None, data_suffix=None, cover_pylib=None, auto_data=False, timid=None, branch=None, config_file=True, source=None, omit=None, include=None, debug=None, concurrency=None, ): """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended (with a dot) to `data_file` to create the final file name. If `data_suffix` is simply True, then a suffix is created with the machine and process identity included. `cover_pylib` is a boolean determining whether Python code installed with the Python interpreter is measured. This includes the Python standard library and any packages installed with the interpreter. If `auto_data` is true, then any existing data file will be read when coverage measurement starts, and data will be saved automatically when measurement stops. If `timid` is true, then a slower and simpler trace function will be used. This is important for some environments where manipulation of tracing functions breaks the faster trace function. If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. `config_file` determines what configuration file to read: * If it is ".coveragerc", it is interpreted as if it were True, for backward compatibility. * If it is a string, it is the name of the file to read. If the file can't be read, it is an error. * If it is True, then a few standard files names are tried (".coveragerc", "setup.cfg"). It is not an error for these files to not be found. * If it is False, then no configuration file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. `include` and `omit` are lists of file name patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. `debug` is a list of strings indicating what debugging information is desired. `concurrency` is a string indicating the concurrency library being used in the measured code. Without this, coverage.py will get incorrect results. Valid strings are "greenlet", "eventlet", "gevent", "multiprocessing", or "thread" (the default). .. versionadded:: 4.0 The `concurrency` parameter. """ # Build our configuration from a number of sources: # 1: defaults: self.config = CoverageConfig() # 2: from the rcfile, .coveragerc or setup.cfg file: if config_file: did_read_rc = False # Some API users were specifying ".coveragerc" to mean the same as # True, so make it so. if config_file == ".coveragerc": config_file = True specified_file = (config_file is not True) if not specified_file: config_file = ".coveragerc" did_read_rc = self.config.from_file(config_file) if not did_read_rc: if specified_file: raise CoverageException( "Couldn't read '%s' as a config file" % config_file ) self.config.from_file("setup.cfg", section_prefix="coverage:") # 3: from environment variables: env_data_file = os.environ.get('COVERAGE_FILE') if env_data_file: self.config.data_file = env_data_file debugs = os.environ.get('COVERAGE_DEBUG') if debugs: self.config.debug.extend(debugs.split(",")) # 4: from constructor arguments: self.config.from_args( data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include, debug=debug, concurrency=concurrency, ) self._debug_file = None self._auto_data = auto_data self._data_suffix = data_suffix # The matchers for _should_trace. self.source_match = None self.source_pkgs_match = None self.pylib_match = self.cover_match = None self.include_match = self.omit_match = None # Is it ok for no data to be collected? self._warn_no_data = True self._warn_unimported_source = True # A record of all the warnings that have been issued. self._warnings = [] # Other instance attributes, set later. self.omit = self.include = self.source = None self.source_pkgs = None self.data = self.data_files = self.collector = None self.plugins = None self.pylib_dirs = self.cover_dirs = None self.data_suffix = self.run_suffix = None self._exclude_re = None self.debug = None # State machine variables: # Have we initialized everything? self._inited = False # Have we started collecting and not stopped it? self._started = False # Have we measured some data and not harvested it? self._measured = False def _init(self): """Set all the initial state. This is called by the public methods to initialize state. This lets us construct a :class:`Coverage` object, then tweak its state before this function is called. """ if self._inited: return # Create and configure the debugging controller. COVERAGE_DEBUG_FILE # is an environment variable, the name of a file to append debug logs # to. if self._debug_file is None: debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE") if debug_file_name: self._debug_file = open(debug_file_name, "a") else: self._debug_file = sys.stderr self.debug = DebugControl(self.config.debug, self._debug_file) # Load plugins self.plugins = Plugins.load_plugins(self.config.plugins, self.config, self.debug) # _exclude_re is a dict that maps exclusion list names to compiled # regexes. self._exclude_re = {} self._exclude_regex_stale() files.set_relative_directory() # The source argument can be directories or package names. self.source = [] self.source_pkgs = [] for src in self.config.source or []: if os.path.exists(src): self.source.append(files.canonical_filename(src)) else: self.source_pkgs.append(src) self.omit = prep_patterns(self.config.omit) self.include = prep_patterns(self.config.include) concurrency = self.config.concurrency if concurrency == "multiprocessing": patch_multiprocessing() concurrency = None self.collector = Collector( should_trace=self._should_trace, check_include=self._check_include_omit_etc, timid=self.config.timid, branch=self.config.branch, warn=self._warn, concurrency=concurrency, ) # Early warning if we aren't going to be able to support plugins. if self.plugins.file_tracers and not self.collector.supports_plugins: self._warn( "Plugin file tracers (%s) aren't supported with %s" % ( ", ".join( plugin._coverage_plugin_name for plugin in self.plugins.file_tracers ), self.collector.tracer_name(), ) ) for plugin in self.plugins.file_tracers: plugin._coverage_enabled = False # Suffixes are a bit tricky. We want to use the data suffix only when # collecting data, not when combining data. So we save it as # `self.run_suffix` now, and promote it to `self.data_suffix` if we # find that we are collecting data later. if self._data_suffix or self.config.parallel: if not isinstance(self._data_suffix, string_class): # if data_suffix=True, use .machinename.pid.random self._data_suffix = True else: self._data_suffix = None self.data_suffix = None self.run_suffix = self._data_suffix # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. self.data = CoverageData(debug=self.debug) self.data_files = CoverageDataFiles(basename=self.config.data_file, warn=self._warn) # The directories for files considered "installed with the interpreter". self.pylib_dirs = set() if not self.config.cover_pylib: # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some # environments (virtualenv, for example), these modules may be # spread across a few locations. Look at all the candidate modules # we've imported, and take all the different ones. for m in (atexit, inspect, os, platform, re, _structseq, traceback): if m is not None and hasattr(m, "__file__"): self.pylib_dirs.add(self._canonical_dir(m)) if _structseq and not hasattr(_structseq, '__file__'): # PyPy 2.4 has no __file__ in the builtin modules, but the code # objects still have the file names. So dig into one to find # the path to exclude. structseq_new = _structseq.structseq_new try: structseq_file = structseq_new.func_code.co_filename except AttributeError: structseq_file = structseq_new.__code__.co_filename self.pylib_dirs.add(self._canonical_dir(structseq_file)) # To avoid tracing the coverage.py code itself, we skip anything # located where we are. self.cover_dirs = [self._canonical_dir(__file__)] if env.TESTING: # When testing, we use PyContracts, which should be considered # part of coverage.py, and it uses six. Exclude those directories # just as we exclude ourselves. import contracts, six for mod in [contracts, six]: self.cover_dirs.append(self._canonical_dir(mod)) # Set the reporting precision. Numbers.set_precision(self.config.precision) atexit.register(self._atexit) self._inited = True # Create the matchers we need for _should_trace if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) self.source_pkgs_match = ModuleMatcher(self.source_pkgs) else: if self.cover_dirs: self.cover_match = TreeMatcher(self.cover_dirs) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: self.include_match = FnmatchMatcher(self.include) if self.omit: self.omit_match = FnmatchMatcher(self.omit) # The user may want to debug things, show info if desired. wrote_any = False if self.debug.should('config'): config_info = sorted(self.config.__dict__.items()) self.debug.write_formatted_info("config", config_info) wrote_any = True if self.debug.should('sys'): self.debug.write_formatted_info("sys", self.sys_info()) for plugin in self.plugins: header = "sys: " + plugin._coverage_plugin_name info = plugin.sys_info() self.debug.write_formatted_info(header, info) wrote_any = True if wrote_any: self.debug.write_formatted_info("end", ()) def _canonical_dir(self, morf): """Return the canonical directory of the module or file `morf`.""" morf_filename = PythonFileReporter(morf, self).filename return os.path.split(morf_filename)[0] def _source_for_file(self, filename): """Return the source file for `filename`. Given a file name being traced, return the best guess as to the source file to attribute it to. """ if filename.endswith(".py"): # .py files are themselves source files. return filename elif filename.endswith((".pyc", ".pyo")): # Bytecode files probably have source files near them. py_filename = filename[:-1] if os.path.exists(py_filename): # Found a .py file, use that. return py_filename if env.WINDOWS: # On Windows, it could be a .pyw file. pyw_filename = py_filename + "w" if os.path.exists(pyw_filename): return pyw_filename # Didn't find source, but it's probably the .py file we want. return py_filename elif filename.endswith("$py.class"): # Jython is easy to guess. return filename[:-9] + ".py" # No idea, just use the file name as-is. return filename def _name_for_module(self, module_globals, filename): """Get the name of the module for a set of globals and file name. For configurability's sake, we allow __main__ modules to be matched by their importable name. If loaded via runpy (aka -m), we can usually recover the "original" full dotted module name, otherwise, we resort to interpreting the file name to get the module's name. In the case that the module name can't be determined, None is returned. """ dunder_name = module_globals.get('__name__', None) if isinstance(dunder_name, str) and dunder_name != '__main__': # This is the usual case: an imported module. return dunder_name loader = module_globals.get('__loader__', None) for attrname in ('fullname', 'name'): # attribute renamed in py3.2 if hasattr(loader, attrname): fullname = getattr(loader, attrname) else: continue if isinstance(fullname, str) and fullname != '__main__': # Module loaded via: runpy -m return fullname # Script as first argument to Python command line. inspectedname = inspect.getmodulename(filename) if inspectedname is not None: return inspectedname else: return dunder_name def _should_trace_internal(self, filename, frame): """Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a FileDisposition object. """ original_filename = filename disp = _disposition_init(self.collector.file_disposition_class, filename) def nope(disp, reason): """Simple helper to make it easy to return NO.""" disp.trace = False disp.reason = reason return disp # Compiled Python files have two file names: frame.f_code.co_filename is # the file name at the time the .pyc was compiled. The second name is # __file__, which is where the .pyc was actually loaded from. Since # .pyc files can be moved after compilation (for example, by being # installed), we look for __file__ in the frame and prefer it to the # co_filename value. dunder_file = frame.f_globals.get('__file__') if dunder_file: filename = self._source_for_file(dunder_file) if original_filename and not original_filename.startswith('<'): orig = os.path.basename(original_filename) if orig != os.path.basename(filename): # Files shouldn't be renamed when moved. This happens when # exec'ing code. If it seems like something is wrong with # the frame's file name, then just use the original. filename = original_filename if not filename: # Empty string is pretty useless. return nope(disp, "empty string isn't a file name") if filename.startswith('memory:'): return nope(disp, "memory isn't traceable") if filename.startswith('<'): # Lots of non-file execution is represented with artificial # file names like "<string>", "<doctest readme.txt[0]>", or # "<exec_function>". Don't ever trace these executions, since we # can't do anything with the data later anyway. return nope(disp, "not a real file name") # pyexpat does a dumb thing, calling the trace function explicitly from # C code with a C file name. if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename): return nope(disp, "pyexpat lies about itself") # Jython reports the .class file to the tracer, use the source file. if filename.endswith("$py.class"): filename = filename[:-9] + ".py" canonical = files.canonical_filename(filename) disp.canonical_filename = canonical # Try the plugins, see if they have an opinion about the file. plugin = None for plugin in self.plugins.file_tracers: if not plugin._coverage_enabled: continue try: file_tracer = plugin.file_tracer(canonical) if file_tracer is not None: file_tracer._coverage_plugin = plugin disp.trace = True disp.file_tracer = file_tracer if file_tracer.has_dynamic_source_filename(): disp.has_dynamic_filename = True else: disp.source_filename = files.canonical_filename( file_tracer.source_filename() ) break except Exception: self._warn( "Disabling plugin %r due to an exception:" % ( plugin._coverage_plugin_name ) ) traceback.print_exc() plugin._coverage_enabled = False continue else: # No plugin wanted it: it's Python. disp.trace = True disp.source_filename = canonical if not disp.has_dynamic_filename: if not disp.source_filename: raise CoverageException( "Plugin %r didn't set source_filename for %r" % (plugin, disp.original_filename) ) reason = self._check_include_omit_etc_internal( disp.source_filename, frame, ) if reason: nope(disp, reason) return disp def _check_include_omit_etc_internal(self, filename, frame): """Check a file name against the include, omit, etc, rules. Returns a string or None. String means, don't trace, and is the reason why. None means no reason found to not trace. """ modulename = self._name_for_module(frame.f_globals, filename) # If the user specified source or include, then that's authoritative # about the outer bound of what to measure and we don't have to apply # any canned exclusions. If they didn't, then we have to exclude the # stdlib and coverage.py directories. if self.source_match: if self.source_pkgs_match.match(modulename): if modulename in self.source_pkgs: self.source_pkgs.remove(modulename) return None # There's no reason to skip this file. if not self.source_match.match(filename): return "falls outside the --source trees" elif self.include_match: if not self.include_match.match(filename): return "falls outside the --include trees" else: # If we aren't supposed to trace installed code, then check if this # is near the Python standard library and skip it if so. if self.pylib_match and self.pylib_match.match(filename): return "is in the stdlib" # We exclude the coverage.py code itself, since a little of it # will be measured otherwise. if self.cover_match and self.cover_match.match(filename): return "is part of coverage.py" # Check the file against the omit pattern. if self.omit_match and self.omit_match.match(filename): return "is inside an --omit pattern" # No reason found to skip this file. return None def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename`. Calls `_should_trace_internal`, and returns the FileDisposition. """ disp = self._should_trace_internal(filename, frame) if self.debug.should('trace'): self.debug.write(_disposition_debug_msg(disp)) return disp def _check_include_omit_etc(self, filename, frame): """Check a file name against the include/omit/etc, rules, verbosely. Returns a boolean: True if the file should be traced, False if not. """ reason = self._check_include_omit_etc_internal(filename, frame) if self.debug.should('trace'): if not reason: msg = "Including %r" % (filename,) else: msg = "Not including %r: %s" % (filename, reason) self.debug.write(msg) return not reason def _warn(self, msg): """Use `msg` as a warning.""" self._warnings.append(msg) if self.debug.should('pid'): msg = "[%d] %s" % (os.getpid(), msg) sys.stderr.write("Coverage.py warning: %s\n" % msg) def get_option(self, option_name): """Get an option from the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with `"run:branch"`. Returns the value of the option. .. versionadded:: 4.0 """ return self.config.get_option(option_name) def set_option(self, option_name, value): """Set an option in the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with ``"run:branch"``. `value` is the new value for the option. This should be a Python value where appropriate. For example, use True for booleans, not the string ``"True"``. As an example, calling:: cov.set_option("run:branch", True) has the same effect as this configuration file:: [run] branch = True .. versionadded:: 4.0 """ self.config.set_option(option_name, value) def use_cache(self, usecache): """Obsolete method.""" self._init() if not usecache: self._warn("use_cache(False) is no longer supported.") def load(self): """Load previously-collected coverage data from the data file.""" self._init() self.collector.reset() self.data_files.read(self.data) def start(self): """Start measuring code coverage. Coverage measurement actually occurs in functions called after :meth:`start` is invoked. Statements in the same scope as :meth:`start` won't be measured. Once you invoke :meth:`start`, you must also call :meth:`stop` eventually, or your process might not shut down cleanly. """ self._init() if self.run_suffix: # Calling start() means we're running code, so use the run_suffix # as the data_suffix when we eventually save the data. self.data_suffix = self.run_suffix if self._auto_data: self.load() self.collector.start() self._started = True self._measured = True def stop(self): """Stop measuring code coverage.""" if self._started: self.collector.stop() self._started = False def _atexit(self): """Clean up on process shutdown.""" if self._started: self.stop() if self._auto_data: self.save() def erase(self): """Erase previously-collected coverage data. This removes the in-memory data collected in this session as well as discarding the data file. """ self._init() self.collector.reset() self.data.erase() self.data_files.erase(parallel=self.config.parallel) def clear_exclude(self, which='exclude'): """Clear the exclude list.""" self._init() setattr(self.config, which + "_list", []) self._exclude_regex_stale() def exclude(self, regex, which='exclude'): """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ self._init() excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self): """Drop all the compiled exclusion regexes, a list was modified.""" self._exclude_re.clear() def _exclude_regex(self, which): """Return a compiled regex for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which='exclude'): """Return a list of excluded regex patterns. `which` indicates which list is desired. See :meth:`exclude` for the lists that are available, and their meaning. """ self._init() return getattr(self.config, which + "_list") def save(self): """Save the collected coverage data to the data file.""" self._init() self.get_data() self.data_files.write(self.data, suffix=self.data_suffix) def combine(self, data_paths=None): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. `data_paths` is a list of files or directories from which data should be combined. If no list is passed, then the data files from the directory indicated by the current data file (probably the current directory) will be combined. .. versionadded:: 4.0 The `data_paths` parameter. """ self._init() self.get_data() aliases = None if self.config.paths: aliases = PathAliases() for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) self.data_files.combine_parallel_data(self.data, aliases=aliases, data_paths=data_paths) def get_data(self): """Get the collected data and reset the collector. Also warn about various problems collecting data. Returns a :class:`coverage.CoverageData`, the collected coverage data. .. versionadded:: 4.0 """ self._init() if not self._measured: return self.data self.collector.save_data(self.data) # If there are still entries in the source_pkgs list, then we never # encountered those packages. if self._warn_unimported_source: for pkg in self.source_pkgs: if pkg not in sys.modules: self._warn("Module %s was never imported." % pkg) elif not ( hasattr(sys.modules[pkg], '__file__') and os.path.exists(sys.modules[pkg].__file__) ): self._warn("Module %s has no Python source." % pkg) else: self._warn("Module %s was previously imported, but not measured." % pkg) # Find out if we got any data. if not self.data and self._warn_no_data: self._warn("No data was collected.") # Find files that were never executed at all. for src in self.source: for py_file in find_python_files(src): py_file = files.canonical_filename(py_file) if self.omit_match and self.omit_match.match(py_file): # Turns out this file was omitted, so don't pull it back # in as unexecuted. continue self.data.touch_file(py_file) if self.config.note: self.data.add_run_info(note=self.config.note) self._measured = False return self.data # Backward compatibility with version 1. def analysis(self, morf): """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf def analysis2(self, morf): """Analyze a module. `morf` is a module or a file name. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The file name for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ self._init() analysis = self._analyze(morf) return ( analysis.filename, sorted(analysis.statements), sorted(analysis.excluded), sorted(analysis.missing), analysis.missing_formatted(), ) def _analyze(self, it): """Analyze a single morf or code unit. Returns an `Analysis` object. """ self.get_data() if not isinstance(it, FileReporter): it = self._get_file_reporter(it) return Analysis(self.data, it) def _get_file_reporter(self, morf): """Get a FileReporter for a module or file name.""" plugin = None file_reporter = "python" if isinstance(morf, string_class): abs_morf = abs_file(morf) plugin_name = self.data.file_tracer(abs_morf) if plugin_name: plugin = self.plugins.get(plugin_name) if plugin: file_reporter = plugin.file_reporter(abs_morf) if file_reporter is None: raise CoverageException( "Plugin %r did not provide a file reporter for %r." % ( plugin._coverage_plugin_name, morf ) ) if file_reporter == "python": file_reporter = PythonFileReporter(morf, self) return file_reporter def _get_file_reporters(self, morfs=None): """Get a list of FileReporters for a list of modules or file names. For each module or file name in `morfs`, find a FileReporter. Return the list of FileReporters. If `morfs` is a single module or file name, this returns a list of one FileReporter. If `morfs` is empty or None, then the list of all files measured is used to find the FileReporters. """ if not morfs: morfs = self.data.measured_files() # Be sure we have a list. if not isinstance(morfs, (list, tuple)): morfs = [morfs] file_reporters = [] for morf in morfs: file_reporter = self._get_file_reporter(morf) file_reporters.append(file_reporter) return file_reporters def report( self, morfs=None, show_missing=None, ignore_errors=None, file=None, # pylint: disable=redefined-builtin omit=None, include=None, skip_covered=None, ): """Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. `include` is a list of file name patterns. Files that match will be included in the report. Files matching `omit` will not be included in the report. Returns a float, the total percentage covered. """ self.get_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, show_missing=show_missing, skip_covered=skip_covered, ) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file) def annotate( self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, ): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See :meth:`report` for other arguments. """ self.get_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include ) reporter = AnnotateReporter(self, self.config) reporter.report(morfs, directory=directory) def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, extra_css=None, title=None): """Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the overview starting point, with links to more detailed pages for individual modules. `extra_css` is a path to a file of other CSS to apply on the page. It will be copied into the HTML directory. `title` is a text string (not HTML) to use as the title of the HTML report. See :meth:`report` for other arguments. Returns a float, the total percentage covered. """ self.get_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, extra_css=extra_css, html_title=title, ) reporter = HtmlReporter(self, self.config) return reporter.report(morfs) def xml_report( self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None, ): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See :meth:`report` for other arguments. Returns a float, the total percentage covered. """ self.get_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile, ) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: # Ensure that the output directory is created; done here # because this report pre-opens the output file. # HTMLReport does this using the Report plumbing because # its task is more complex, being multiple files. output_dir = os.path.dirname(self.config.xml_output) if output_dir and not os.path.isdir(output_dir): os.makedirs(output_dir) open_kwargs = {} if env.PY3: open_kwargs['encoding'] = 'utf8' outfile = open(self.config.xml_output, "w", **open_kwargs) file_to_close = outfile try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output) def sys_info(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod self._init() ft_plugins = [] for ft in self.plugins.file_tracers: ft_name = ft._coverage_plugin_name if not ft._coverage_enabled: ft_name += " (disabled)" ft_plugins.append(ft_name) info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), ('cover_dirs', self.cover_dirs), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), ('plugins.file_tracers', ft_plugins), ('config_files', self.config.attempted_config_files), ('configs_read', self.config.config_files), ('data_path', self.data_files.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', platform.python_implementation()), ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), ('environment', sorted( ("%s = %s" % (k, v)) for k, v in iitems(os.environ) if k.startswith(("COV", "PY")) )), ('command_line', " ".join(getattr(sys, 'argv', ['???']))), ] matcher_names = [ 'source_match', 'source_pkgs_match', 'include_match', 'omit_match', 'cover_match', 'pylib_match', ] for matcher_name in matcher_names: matcher = getattr(self, matcher_name) if matcher: matcher_info = matcher.info() else: matcher_info = '-none-' info.append((matcher_name, matcher_info)) return info
class Coverage(object): """Programmatic access to coverage.py. To use:: from coverage import Coverage cov = Coverage() cov.start() #.. call your code .. cov.stop() cov.html_report(directory='covhtml') """ def __init__( self, data_file=None, data_suffix=None, cover_pylib=None, auto_data=False, timid=None, branch=None, config_file=True, source=None, omit=None, include=None, debug=None, concurrency=None, check_preimported=False, context=None, ): """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended (with a dot) to `data_file` to create the final file name. If `data_suffix` is simply True, then a suffix is created with the machine and process identity included. `cover_pylib` is a boolean determining whether Python code installed with the Python interpreter is measured. This includes the Python standard library and any packages installed with the interpreter. If `auto_data` is true, then any existing data file will be read when coverage measurement starts, and data will be saved automatically when measurement stops. If `timid` is true, then a slower and simpler trace function will be used. This is important for some environments where manipulation of tracing functions breaks the faster trace function. If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. `config_file` determines what configuration file to read: * If it is ".coveragerc", it is interpreted as if it were True, for backward compatibility. * If it is a string, it is the name of the file to read. If the file can't be read, it is an error. * If it is True, then a few standard files names are tried (".coveragerc", "setup.cfg", "tox.ini"). It is not an error for these files to not be found. * If it is False, then no configuration file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. `include` and `omit` are lists of file name patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. `debug` is a list of strings indicating what debugging information is desired. `concurrency` is a string indicating the concurrency library being used in the measured code. Without this, coverage.py will get incorrect results if these libraries are in use. Valid strings are "greenlet", "eventlet", "gevent", "multiprocessing", or "thread" (the default). This can also be a list of these strings. If `check_preimported` is true, then when coverage is started, the aleady-imported files will be checked to see if they should be measured by coverage. Importing measured files before coverage is started can mean that code is missed. `context` is a string to use as the context label for collected data. .. versionadded:: 4.0 The `concurrency` parameter. .. versionadded:: 4.2 The `concurrency` parameter can now be a list of strings. .. versionadded:: 5.0 The `check_preimported` and `context` parameters. """ # Build our configuration from a number of sources. self.config = read_coverage_config( config_file=config_file, data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, run_omit=omit, run_include=include, debug=debug, report_omit=omit, report_include=include, concurrency=concurrency, context=context, ) # This is injectable by tests. self._debug_file = None self._auto_load = self._auto_save = auto_data self._data_suffix_specified = data_suffix # Is it ok for no data to be collected? self._warn_no_data = True self._warn_unimported_source = True self._warn_preimported_source = check_preimported # A record of all the warnings that have been issued. self._warnings = [] # Other instance attributes, set later. self._data = self._collector = None self._plugins = None self._inorout = None self._inorout_class = InOrOut self._data_suffix = self._run_suffix = None self._exclude_re = None self._debug = None # State machine variables: # Have we initialized everything? self._inited = False self._inited_for_start = False # Have we started collecting and not stopped it? self._started = False # Have we written --debug output? self._wrote_debug = False # If we have sub-process measurement happening automatically, then we # want any explicit creation of a Coverage object to mean, this process # is already coverage-aware, so don't auto-measure it. By now, the # auto-creation of a Coverage object has already happened. But we can # find it and tell it not to save its data. if not env.METACOV: _prevent_sub_process_measurement() def _init(self): """Set all the initial state. This is called by the public methods to initialize state. This lets us construct a :class:`Coverage` object, then tweak its state before this function is called. """ if self._inited: return self._inited = True # Create and configure the debugging controller. COVERAGE_DEBUG_FILE # is an environment variable, the name of a file to append debug logs # to. self._debug = DebugControl(self.config.debug, self._debug_file) # _exclude_re is a dict that maps exclusion list names to compiled regexes. self._exclude_re = {} set_relative_directory() # Load plugins self._plugins = Plugins.load_plugins(self.config.plugins, self.config, self._debug) # Run configuring plugins. for plugin in self._plugins.configurers: # We need an object with set_option and get_option. Either self or # self.config will do. Choosing randomly stops people from doing # other things with those objects, against the public API. Yes, # this is a bit childish. :) plugin.configure([self, self.config][int(time.time()) % 2]) def _post_init(self): """Stuff to do after everything is initialized.""" if not self._wrote_debug: self._wrote_debug = True self._write_startup_debug() def _write_startup_debug(self): """Write out debug info at startup if needed.""" wrote_any = False with self._debug.without_callers(): if self._debug.should('config'): config_info = sorted(self.config.__dict__.items()) config_info = [(k, v) for k, v in config_info if not k.startswith('_')] write_formatted_info(self._debug, "config", config_info) wrote_any = True if self._debug.should('sys'): write_formatted_info(self._debug, "sys", self.sys_info()) for plugin in self._plugins: header = "sys: " + plugin._coverage_plugin_name info = plugin.sys_info() write_formatted_info(self._debug, header, info) wrote_any = True if wrote_any: write_formatted_info(self._debug, "end", ()) def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename`. Calls `_should_trace_internal`, and returns the FileDisposition. """ disp = self._inorout.should_trace(filename, frame) if self._debug.should('trace'): self._debug.write(disposition_debug_msg(disp)) return disp def _check_include_omit_etc(self, filename, frame): """Check a file name against the include/omit/etc, rules, verbosely. Returns a boolean: True if the file should be traced, False if not. """ reason = self._inorout.check_include_omit_etc(filename, frame) if self._debug.should('trace'): if not reason: msg = "Including %r" % (filename,) else: msg = "Not including %r: %s" % (filename, reason) self._debug.write(msg) return not reason def _warn(self, msg, slug=None): """Use `msg` as a warning. For warning suppression, use `slug` as the shorthand. """ if slug in self.config.disable_warnings: # Don't issue the warning return self._warnings.append(msg) if slug: msg = "%s (%s)" % (msg, slug) if self._debug.should('pid'): msg = "[%d] %s" % (os.getpid(), msg) sys.stderr.write("Coverage.py warning: %s\n" % msg) def get_option(self, option_name): """Get an option from the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with `"run:branch"`. Returns the value of the option. .. versionadded:: 4.0 """ return self.config.get_option(option_name) def set_option(self, option_name, value): """Set an option in the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with ``"run:branch"``. `value` is the new value for the option. This should be an appropriate Python value. For example, use True for booleans, not the string ``"True"``. As an example, calling:: cov.set_option("run:branch", True) has the same effect as this configuration file:: [run] branch = True .. versionadded:: 4.0 """ self.config.set_option(option_name, value) def load(self): """Load previously-collected coverage data from the data file.""" self._init() if self._collector: self._collector.reset() should_skip = self.config.parallel and not os.path.exists(self.config.data_file) if not should_skip: self._init_data(suffix=None) self._post_init() if not should_skip: self._data.read() def _init_for_start(self): """Initialization for start()""" # Construct the collector. concurrency = self.config.concurrency or [] if "multiprocessing" in concurrency: if not patch_multiprocessing: raise CoverageException( # pragma: only jython "multiprocessing is not supported on this Python" ) patch_multiprocessing(rcfile=self.config.config_file) # Multi-processing uses parallel for the subprocesses, so also use # it for the main process. self.config.parallel = True if self.config.dynamic_context is None: context_switchers = [] elif self.config.dynamic_context == "test_function": context_switchers = [should_start_context_test_function] else: raise CoverageException( "Don't understand dynamic_context setting: {!r}".format(self.config.dynamic_context) ) context_switchers.extend( plugin.dynamic_context for plugin in self._plugins.context_switchers ) should_start_context = combine_context_switchers(context_switchers) self._collector = Collector( should_trace=self._should_trace, check_include=self._check_include_omit_etc, should_start_context=should_start_context, timid=self.config.timid, branch=self.config.branch, warn=self._warn, concurrency=concurrency, ) suffix = self._data_suffix_specified if suffix or self.config.parallel: if not isinstance(suffix, string_class): # if data_suffix=True, use .machinename.pid.random suffix = True else: suffix = None self._init_data(suffix) self._collector.use_data(self._data, self.config.context) # Early warning if we aren't going to be able to support plugins. if self._plugins.file_tracers and not self._collector.supports_plugins: self._warn( "Plugin file tracers (%s) aren't supported with %s" % ( ", ".join( plugin._coverage_plugin_name for plugin in self._plugins.file_tracers ), self._collector.tracer_name(), ) ) for plugin in self._plugins.file_tracers: plugin._coverage_enabled = False # Create the file classifying substructure. self._inorout = self._inorout_class(warn=self._warn) self._inorout.configure(self.config) self._inorout.plugins = self._plugins self._inorout.disp_class = self._collector.file_disposition_class atexit.register(self._atexit) def _init_data(self, suffix): """Create a data file if we don't have one yet.""" if self._data is None: # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. ensure_dir_for_file(self.config.data_file) self._data = CoverageData( basename=self.config.data_file, suffix=suffix, warn=self._warn, debug=self._debug, ) def start(self): """Start measuring code coverage. Coverage measurement only occurs in functions called after :meth:`start` is invoked. Statements in the same scope as :meth:`start` won't be measured. Once you invoke :meth:`start`, you must also call :meth:`stop` eventually, or your process might not shut down cleanly. """ self._init() if not self._inited_for_start: self._inited_for_start = True self._init_for_start() self._post_init() # Issue warnings for possible problems. self._inorout.warn_conflicting_settings() # See if we think some code that would eventually be measured has # already been imported. if self._warn_preimported_source: self._inorout.warn_already_imported_files() if self._auto_load: self.load() self._collector.start() self._started = True def stop(self): """Stop measuring code coverage.""" if self._started: self._collector.stop() self._started = False def _atexit(self): """Clean up on process shutdown.""" if self._debug.should("process"): self._debug.write("atexit: {0!r}".format(self)) if self._started: self.stop() if self._auto_save: self.save() def erase(self): """Erase previously-collected coverage data. This removes the in-memory data collected in this session as well as discarding the data file. """ self._init() self._post_init() if self._collector: self._collector.reset() self._init_data(suffix=None) self._data.erase(parallel=self.config.parallel) self._data = None def switch_context(self, new_context): """Switch to a new dynamic context. `new_context` is a string to use as the context label for collected data. If a :ref:`static context <static_contexts>` is in use, the static and dynamic context labels will be joined together with a pipe character. Coverage collection must be started already. .. versionadded:: 5.0 """ if not self._started: raise CoverageException( # pragma: only jython "Cannot switch context, coverage is not started" ) self._collector.switch_context(new_context) def clear_exclude(self, which='exclude'): """Clear the exclude list.""" self._init() setattr(self.config, which + "_list", []) self._exclude_regex_stale() def exclude(self, regex, which='exclude'): """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ self._init() excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self): """Drop all the compiled exclusion regexes, a list was modified.""" self._exclude_re.clear() def _exclude_regex(self, which): """Return a compiled regex for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which='exclude'): """Return a list of excluded regex patterns. `which` indicates which list is desired. See :meth:`exclude` for the lists that are available, and their meaning. """ self._init() return getattr(self.config, which + "_list") def save(self): """Save the collected coverage data to the data file.""" data = self.get_data() data.write() def combine(self, data_paths=None, strict=False): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. `data_paths` is a list of files or directories from which data should be combined. If no list is passed, then the data files from the directory indicated by the current data file (probably the current directory) will be combined. If `strict` is true, then it is an error to attempt to combine when there are no data files to combine. .. versionadded:: 4.0 The `data_paths` parameter. .. versionadded:: 4.3 The `strict` parameter. """ self._init() self._init_data(suffix=None) self._post_init() self.get_data() aliases = None if self.config.paths: aliases = PathAliases() for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) combine_parallel_data(self._data, aliases=aliases, data_paths=data_paths, strict=strict) def get_data(self): """Get the collected data. Also warn about various problems collecting data. Returns a :class:`coverage.CoverageData`, the collected coverage data. .. versionadded:: 4.0 """ self._init() self._init_data(suffix=None) self._post_init() if self._collector and self._collector.flush_data(): self._post_save_work() return self._data def _post_save_work(self): """After saving data, look for warnings, post-work, etc. Warn about things that should have happened but didn't. Look for unexecuted files. """ # If there are still entries in the source_pkgs_unmatched list, # then we never encountered those packages. if self._warn_unimported_source: self._inorout.warn_unimported_source() # Find out if we got any data. if not self._data and self._warn_no_data: self._warn("No data was collected.", slug="no-data-collected") # Find files that were never executed at all. for file_path, plugin_name in self._inorout.find_unexecuted_files(): self._data.touch_file(file_path, plugin_name) if self.config.note: self._data.add_run_info(note=self.config.note) # Backward compatibility with version 1. def analysis(self, morf): """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf def analysis2(self, morf): """Analyze a module. `morf` is a module or a file name. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The file name for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ analysis = self._analyze(morf) return ( analysis.filename, sorted(analysis.statements), sorted(analysis.excluded), sorted(analysis.missing), analysis.missing_formatted(), ) def _analyze(self, it): """Analyze a single morf or code unit. Returns an `Analysis` object. """ # All reporting comes through here, so do reporting initialization. self._init() Numbers.set_precision(self.config.precision) self._post_init() data = self.get_data() if not isinstance(it, FileReporter): it = self._get_file_reporter(it) return Analysis(data, it) def _get_file_reporter(self, morf): """Get a FileReporter for a module or file name.""" plugin = None file_reporter = "python" if isinstance(morf, string_class): abs_morf = abs_file(morf) plugin_name = self._data.file_tracer(abs_morf) if plugin_name: plugin = self._plugins.get(plugin_name) if plugin: file_reporter = plugin.file_reporter(abs_morf) if file_reporter is None: raise CoverageException( "Plugin %r did not provide a file reporter for %r." % ( plugin._coverage_plugin_name, morf ) ) if file_reporter == "python": file_reporter = PythonFileReporter(morf, self) return file_reporter def _get_file_reporters(self, morfs=None): """Get a list of FileReporters for a list of modules or file names. For each module or file name in `morfs`, find a FileReporter. Return the list of FileReporters. If `morfs` is a single module or file name, this returns a list of one FileReporter. If `morfs` is empty or None, then the list of all files measured is used to find the FileReporters. """ if not morfs: morfs = self._data.measured_files() # Be sure we have a collection. if not isinstance(morfs, (list, tuple, set)): morfs = [morfs] file_reporters = [self._get_file_reporter(morf) for morf in morfs] return file_reporters def report( self, morfs=None, show_missing=None, ignore_errors=None, file=None, omit=None, include=None, skip_covered=None, ): """Write a textual summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. If `show_missing` is true, then details of which lines or branches are missing will be included in the report. If `ignore_errors` is true, then a failure while reporting a single file will not stop the entire report. `file` is a file-like object, suitable for writing. `include` is a list of file name patterns. Files that match will be included in the report. Files matching `omit` will not be included in the report. If `skip_covered` is true, don't report on files with 100% coverage. All of the arguments default to the settings read from the :ref:`configuration file <config>`. Returns a float, the total percentage covered. """ self.config.from_args( ignore_errors=ignore_errors, report_omit=omit, report_include=include, show_missing=show_missing, skip_covered=skip_covered, ) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file) def annotate( self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, ): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See :meth:`report` for other arguments. """ self.config.from_args( ignore_errors=ignore_errors, report_omit=omit, report_include=include ) reporter = AnnotateReporter(self, self.config) reporter.report(morfs, directory=directory) def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, extra_css=None, title=None, skip_covered=None): """Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the overview starting point, with links to more detailed pages for individual modules. `extra_css` is a path to a file of other CSS to apply on the page. It will be copied into the HTML directory. `title` is a text string (not HTML) to use as the title of the HTML report. See :meth:`report` for other arguments. Returns a float, the total percentage covered. """ self.config.from_args( ignore_errors=ignore_errors, report_omit=omit, report_include=include, html_dir=directory, extra_css=extra_css, html_title=title, skip_covered=skip_covered, ) reporter = HtmlReporter(self, self.config) return reporter.report(morfs) def xml_report( self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None, ): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See :meth:`report` for other arguments. Returns a float, the total percentage covered. """ self.config.from_args( ignore_errors=ignore_errors, report_omit=omit, report_include=include, xml_output=outfile, ) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: # Ensure that the output directory is created; done here # because this report pre-opens the output file. # HTMLReport does this using the Report plumbing because # its task is more complex, being multiple files. ensure_dir_for_file(self.config.xml_output) open_kwargs = {} if env.PY3: open_kwargs['encoding'] = 'utf8' outfile = open(self.config.xml_output, "w", **open_kwargs) file_to_close = outfile try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output) def sys_info(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod self._init() self._post_init() def plugin_info(plugins): """Make an entry for the sys_info from a list of plug-ins.""" entries = [] for plugin in plugins: entry = plugin._coverage_plugin_name if not plugin._coverage_enabled: entry += " (disabled)" entries.append(entry) return entries info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), ('tracer', self._collector.tracer_name() if self._collector else "-none-"), ('CTracer', 'available' if CTracer else "unavailable"), ('plugins.file_tracers', plugin_info(self._plugins.file_tracers)), ('plugins.configurers', plugin_info(self._plugins.configurers)), ('configs_attempted', self.config.attempted_config_files), ('configs_read', self.config.config_files_read), ('config_file', self.config.config_file), ('config_contents', repr(self.config._config_contents) if self.config._config_contents else '-none-' ), ('data_file', self._data.filename if self._data else "-none-"), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', platform.python_implementation()), ('executable', sys.executable), ('def_encoding', sys.getdefaultencoding()), ('fs_encoding', sys.getfilesystemencoding()), ('pid', os.getpid()), ('cwd', os.getcwd()), ('path', sys.path), ('environment', sorted( ("%s = %s" % (k, v)) for k, v in iitems(os.environ) if any(slug in k for slug in ("COV", "PY")) )), ('command_line', " ".join(getattr(sys, 'argv', ['-none-']))), ] if self._inorout: info.extend(self._inorout.sys_info()) return info