def find_branches(module_path): branches = set() with open(module_path) as f: source = f.read() if not source: return set() for start_line, function_source in get_function_sources( module_path, source): parser = PythonParser(text=function_source) try: parser.parse_source() arcs = parser.arcs() except coverage.misc.NotPython as e: logger.error(e) return set() offset = set() for fr, to in arcs: if fr < 0 or to < 0: continue new_fr = fr + start_line - 1 new_to = to + start_line - 1 offset.add((new_fr, new_to)) branches.update(offset) return branches
def compare_alternatives(source): all_all_arcs = collections.defaultdict(list) for i, alternate_source in enumerate(async_alternatives(source)): parser = PythonParser(alternate_source) arcs = parser.arcs() all_all_arcs[tuple(arcs)].append((i, alternate_source)) return len(all_all_arcs)
def parse_source(self, text): """Parse `text` as source, and return the `PythonParser` used.""" if env.PY2: text = text.decode("ascii") text = textwrap.dedent(text) parser = PythonParser(text=text, exclude="nocover") parser.parse_source() return parser
def parser(self): """Lazily create a :class:`PythonParser`.""" if self._parser is None: self._parser = PythonParser( filename=self.filename, exclude=self.coverage._exclude_regex('exclude'), ) self._parser.parse_source() return self._parser
def one_file(self, options, filename): """Process just one file.""" try: text = get_python_source(filename) bp = ByteParser(text, filename=filename) except Exception as err: print("%s" % (err,)) return if options.dis: print("Main code:") self.disassemble(bp, histogram=options.histogram) arcs = bp._all_arcs() if options.chunks: chunks = bp._all_chunks() if options.recursive: print("%6d: %s" % (len(chunks), filename)) else: print("Chunks: %r" % chunks) print("Arcs: %r" % sorted(arcs)) if options.source or options.tokens: cp = PythonParser(filename=filename, exclude=r"no\s*cover") cp.show_tokens = options.tokens cp.parse_source() if options.source: arc_width = 0 arc_chars = {} if options.chunks: arc_chars = self.arc_ascii_art(arcs) if arc_chars: arc_width = max(len(a) for a in arc_chars.values()) exit_counts = cp.exit_counts() for lineno, ltext in enumerate(cp.lines, start=1): m0 = m1 = m2 = m3 = a = ' ' if lineno in cp.statements: m0 = '=' elif lineno in cp.raw_statements: m0 = '-' exits = exit_counts.get(lineno, 0) if exits > 1: m1 = str(exits) if lineno in cp.raw_docstrings: m2 = '"' if lineno in cp.raw_classdefs: m2 = 'C' if lineno in cp.raw_excluded: m3 = 'x' a = arc_chars[lineno].ljust(arc_width) print("%4d %s%s%s%s%s %s" % (lineno, m0, m1, m2, m3, a, ltext))
class PantsPythonFileReporter(PythonFileReporter): """Report support for a Python file. At test time we run coverage in an environment where all source roots have been stripped from source files. When we read the coverage report, we would like to see the buildroot relative file names. This class handles mapping from the coverage data stored at test time, which references source root stripped sources to the actual file names we want to see in the reports. In order for this to work, the environment in which we run `coverage html` (to generate a report) must include all of the source files with their source roots still present. """ def __init__(self, relative_source_root, source_root_stripped_filename, test_time, coverage=None): # Note: Do not call `super()` on this class. The __init__ of the super class goes to a lot of # effort to manufacture absolute paths which will break things when the tests are run in one # chroot and the report generation in another. self.coverage = coverage self.filename_with_source_root = os.path.join( relative_source_root, source_root_stripped_filename) self.filename = source_root_stripped_filename self.test_time = test_time def relative_filename(self): return self.filename_with_source_root _parser = None @property def parser(self): if self._parser is None: filename = self.filename if self.test_time else self.filename_with_source_root self._parser = PythonParser(filename=filename) self._parser.parse_source() return self._parser def no_branch_lines(self): return self.parser.lines_matching( join_regex(DEFAULT_PARTIAL[:]), join_regex(DEFAULT_PARTIAL_ALWAYS[:]), ) _source = None def source(self): if self._source is None: self._source = get_python_source(self.filename_with_source_root) return self._source
def get_parser(self, exclude=None): actual_filename, source = self._find_source(self.filename) return PythonParser( text=source, filename=actual_filename, exclude=exclude, )
def parser(self): if self._parser is None: self._parser = PythonParser( filename=self.filename, exclude=self.coverage._exclude_regex('exclude'), ) return self._parser
class SimpleFileReporter(PythonFileReporter): def __init__(self, morf, relpath): super(SimpleFileReporter, self).__init__(morf, coverage=None) self._relpath = relpath def relative_filename(self): return self._relpath # TODO(John Sirois): Kill the workaround overrides below if there is a useable upstream # resolution to: # https://bitbucket.org/ned/coveragepy/issues/646/modifying-coverage-reporting-for-python @property def parser(self): if self._parser is None: self._parser = PythonParser(filename=self.filename) self._parser.parse_source() return self._parser def no_branch_lines(self): return self.parser.lines_matching(join_regex(DEFAULT_PARTIAL[:]), join_regex(DEFAULT_PARTIAL_ALWAYS[:]))
def _extract_all_branches(python_source: str): parser = PythonParser(text=python_source) # This is a hack to force-disable Python optimization, otherwise Python will optimize away statements like # "if False: ..." and that would cause unexpected diffs in the coverage branches. Even when passing optimize=0 to # compile(). parser._byte_parser = _FakeByteParser( num_lines=len(python_source.splitlines()) + 1) parser.parse_source() return parser.arcs()
def one_file(self, options, filename): """Process just one file.""" try: text = get_python_source(filename) bp = ByteParser(text, filename=filename) except Exception as err: print("%s" % (err, )) return if options.dis: print("Main code:") self.disassemble(bp, histogram=options.histogram) arcs = bp._all_arcs() if options.chunks: chunks = bp._all_chunks() if options.recursive: print("%6d: %s" % (len(chunks), filename)) else: print("Chunks: %r" % chunks) print("Arcs: %r" % sorted(arcs)) if options.source or options.tokens: cp = PythonParser(filename=filename, exclude=r"no\s*cover") cp.show_tokens = options.tokens cp.parse_source() if options.source: arc_width = 0 arc_chars = {} if options.chunks: arc_chars = self.arc_ascii_art(arcs) if arc_chars: arc_width = max(len(a) for a in arc_chars.values()) exit_counts = cp.exit_counts() for lineno, ltext in enumerate(cp.lines, start=1): m0 = m1 = m2 = m3 = a = ' ' if lineno in cp.statements: m0 = '=' elif lineno in cp.raw_statements: m0 = '-' exits = exit_counts.get(lineno, 0) if exits > 1: m1 = str(exits) if lineno in cp.raw_docstrings: m2 = '"' if lineno in cp.raw_classdefs: m2 = 'C' if lineno in cp.raw_excluded: m3 = 'x' a = arc_chars[lineno].ljust(arc_width) print("%4d %s%s%s%s%s %s" % (lineno, m0, m1, m2, m3, a, ltext))
def check_human_coverage(self, text, lines=None, fingerprints=None): text = textwrap.dedent(text) coverage_lines, _ = self.write_and_run(text) m = Module(source_code=text) parser = PythonParser(text=text) parser.parse_source() statements = parser.statements executed = coverage_lines executed = parser.translate_lines(executed) hc = sorted( human_coverage(text, sorted(statements), sorted(statements - executed))) assert hc == lines if fingerprints: assert create_fingerprints(m.lines, m.special_blocks, lines) == fingerprints
def parse_file(self, filename): """Parse `text` as source, and return the `PythonParser` used.""" # pylint: disable=attribute-defined-outside-init parser = PythonParser(filename=filename, exclude="nocover") self.statements, self.excluded = parser.parse_source() return parser
def parser(self): if self._parser is None: self._parser = PythonParser(filename=self.filename) self._parser.parse_source() return self._parser
def parse_text(self, source): """Parse Python source, and return the parser object.""" parser = PythonParser(textwrap.dedent(source)) parser.parse_source() return parser
def parse_file(self, filename): """Parse `text` as source, and return the `PythonParser` used.""" parser = PythonParser(filename=filename, exclude="nocover") parser.parse_source() return parser
class PythonFileReporter(FileReporter): """Report support for a Python file.""" def __init__(self, morf, coverage=None): self.coverage = coverage filename = source_for_morf(morf) super().__init__(canonical_filename(filename)) if hasattr(morf, '__name__'): name = morf.__name__.replace(".", os.sep) if os.path.basename(filename).startswith('__init__.'): name += os.sep + "__init__" name += ".py" else: name = relative_filename(filename) self.relname = name self._source = None self._parser = None self._excluded = None def __repr__(self): return f"<PythonFileReporter {self.filename!r}>" @contract(returns='unicode') def relative_filename(self): return self.relname @property def parser(self): """Lazily create a :class:`PythonParser`.""" if self._parser is None: self._parser = PythonParser( filename=self.filename, exclude=self.coverage._exclude_regex('exclude'), ) self._parser.parse_source() return self._parser def lines(self): """Return the line numbers of statements in the file.""" return self.parser.statements def excluded_lines(self): """Return the line numbers of statements in the file.""" return self.parser.excluded def translate_lines(self, lines): return self.parser.translate_lines(lines) def translate_arcs(self, arcs): return self.parser.translate_arcs(arcs) @expensive def no_branch_lines(self): no_branch = self.parser.lines_matching( join_regex(self.coverage.config.partial_list), join_regex(self.coverage.config.partial_always_list)) return no_branch @expensive def arcs(self): return self.parser.arcs() @expensive def exit_counts(self): return self.parser.exit_counts() def missing_arc_description(self, start, end, executed_arcs=None): return self.parser.missing_arc_description(start, end, executed_arcs) @contract(returns='unicode') def source(self): if self._source is None: self._source = get_python_source(self.filename) return self._source def should_be_python(self): """Does it seem like this file should contain Python? This is used to decide if a file reported as part of the execution of a program was really likely to have contained Python in the first place. """ # Get the file extension. _, ext = os.path.splitext(self.filename) # Anything named *.py* should be Python. if ext.startswith('.py'): return True # A file with no extension should be Python. if not ext: return True # Everything else is probably not Python. return False def source_token_lines(self): return source_token_lines(self.source())
class PythonFileReporter(FileReporter): """Report support for a Python file.""" def __init__(self, morf, coverage=None): self.coverage = coverage if hasattr(morf, '__file__'): filename = morf.__file__ elif isinstance(morf, types.ModuleType): # A module should have had .__file__, otherwise we can't use it. # This could be a PEP-420 namespace package. raise CoverageException("Module {0} has no file".format(morf)) else: filename = morf filename = files.unicode_filename(filename) # .pyc files should always refer to a .py instead. if filename.endswith(('.pyc', '.pyo')): filename = filename[:-1] elif filename.endswith('$py.class'): # Jython filename = filename[:-9] + ".py" super(PythonFileReporter, self).__init__(files.canonical_filename(filename)) if hasattr(morf, '__name__'): name = morf.__name__ name = name.replace(".", os.sep) + ".py" name = files.unicode_filename(name) else: name = files.relative_filename(filename) self.relname = name self._source = None self._parser = None self._statements = None self._excluded = None @contract(returns='unicode') def relative_filename(self): return self.relname @property def parser(self): """Lazily create a :class:`PythonParser`.""" if self._parser is None: self._parser = PythonParser( filename=self.filename, exclude=self.coverage._exclude_regex('exclude'), ) self._parser.parse_source() return self._parser def lines(self): """Return the line numbers of statements in the file.""" return self.parser.statements def excluded_lines(self): """Return the line numbers of statements in the file.""" return self.parser.excluded def translate_lines(self, lines): return self.parser.translate_lines(lines) def translate_arcs(self, arcs): return self.parser.translate_arcs(arcs) @expensive def no_branch_lines(self): no_branch = self.parser.lines_matching( join_regex(self.coverage.config.partial_list), join_regex(self.coverage.config.partial_always_list)) return no_branch @expensive def arcs(self): return self.parser.arcs() @expensive def exit_counts(self): return self.parser.exit_counts() def missing_arc_description(self, start, end, executed_arcs=None): return self.parser.missing_arc_description(start, end, executed_arcs) @contract(returns='unicode') def source(self): if self._source is None: self._source = get_python_source(self.filename) return self._source def should_be_python(self): """Does it seem like this file should contain Python? This is used to decide if a file reported as part of the execution of a program was really likely to have contained Python in the first place. """ # Get the file extension. _, ext = os.path.splitext(self.filename) # Anything named *.py* should be Python. if ext.startswith('.py'): return True # A file with no extension should be Python. if not ext: return True # Everything else is probably not Python. return False def source_token_lines(self): return source_token_lines(self.source())
def parse_text(self, source): """Parse Python source, and return the parser object.""" parser = PythonParser(text=textwrap.dedent(source)) parser.parse_source() return parser
class PythonFileReporter(FileReporter): """Report support for a Python file.""" def __init__(self, morf, coverage=None): self.coverage = coverage if hasattr(morf, '__file__'): filename = morf.__file__ elif isinstance(morf, types.ModuleType): # A module should have had .__file__, otherwise we can't use it. # This could be a PEP-420 namespace package. raise CoverageException("Module {0} has no file".format(morf)) else: filename = morf filename = files.unicode_filename(filename) # .pyc files should always refer to a .py instead. if filename.endswith(('.pyc', '.pyo')): filename = filename[:-1] elif filename.endswith('$py.class'): # Jython filename = filename[:-9] + ".py" super(PythonFileReporter, self).__init__(files.canonical_filename(filename)) if hasattr(morf, '__name__'): name = morf.__name__ name = name.replace(".", os.sep) + ".py" name = files.unicode_filename(name) else: name = files.relative_filename(filename) self.relname = name self._source = None self._parser = None self._statements = None self._excluded = None @contract(returns='unicode') def relative_filename(self): return self.relname @property def parser(self): """Lazily create a :class:`PythonParser`.""" if self._parser is None: self._parser = PythonParser( filename=self.filename, exclude=self.coverage._exclude_regex('exclude'), ) self._parser.parse_source() return self._parser def lines(self): """Return the line numbers of statements in the file.""" return self.parser.statements def excluded_lines(self): """Return the line numbers of statements in the file.""" return self.parser.excluded def translate_lines(self, lines): return self.parser.translate_lines(lines) def translate_arcs(self, arcs): return self.parser.translate_arcs(arcs) @expensive def no_branch_lines(self): no_branch = self.parser.lines_matching( join_regex(self.coverage.config.partial_list), join_regex(self.coverage.config.partial_always_list) ) return no_branch @expensive def arcs(self): return self.parser.arcs() @expensive def exit_counts(self): return self.parser.exit_counts() def missing_arc_description(self, start, end, executed_arcs=None): return self.parser.missing_arc_description(start, end, executed_arcs) @contract(returns='unicode') def source(self): if self._source is None: self._source = get_python_source(self.filename) return self._source def should_be_python(self): """Does it seem like this file should contain Python? This is used to decide if a file reported as part of the execution of a program was really likely to have contained Python in the first place. """ # Get the file extension. _, ext = os.path.splitext(self.filename) # Anything named *.py* should be Python. if ext.startswith('.py'): return True # A file with no extension should be Python. if not ext: return True # Everything else is probably not Python. return False def source_token_lines(self): return source_token_lines(self.source())
def one_file(self, options, filename): """Process just one file.""" # `filename` can have a line number suffix. In that case, extract those # lines, dedent them, and use that. This is for trying test cases # embedded in the test files. match = re.search(r"^(.*):(\d+)-(\d+)$", filename) if match: filename, start, end = match.groups() start, end = int(start), int(end) else: start = end = None try: text = get_python_source(filename) if start is not None: lines = text.splitlines(True) text = textwrap.dedent("".join(lines[start - 1:end]).replace( "\\\\", "\\")) pyparser = PythonParser(text, filename=filename, exclude=r"no\s*cover") pyparser.parse_source() except Exception as err: print(f"{err}") return if options.dis: print("Main code:") self.disassemble(pyparser.byte_parser, histogram=options.histogram) arcs = pyparser.arcs() if options.source or options.tokens: pyparser.show_tokens = options.tokens pyparser.parse_source() if options.source: arc_chars = self.arc_ascii_art(arcs) if arc_chars: arc_width = max(len(a) for a in arc_chars.values()) exit_counts = pyparser.exit_counts() for lineno, ltext in enumerate(pyparser.lines, start=1): marks = [' ', ' ', ' ', ' ', ' '] a = ' ' if lineno in pyparser.raw_statements: marks[0] = '-' if lineno in pyparser.statements: marks[1] = '=' exits = exit_counts.get(lineno, 0) if exits > 1: marks[2] = str(exits) if lineno in pyparser.raw_docstrings: marks[3] = '"' if lineno in pyparser.raw_classdefs: marks[3] = 'C' if lineno in pyparser.raw_excluded: marks[4] = 'x' if arc_chars: a = arc_chars[lineno].ljust(arc_width) else: a = "" print("%4d %s%s %s" % (lineno, "".join(marks), a, ltext))
"""Parse every Python file in a tree.""" import os import sys from coverage.misc import CoverageException from coverage.parser import PythonParser for root, dirnames, filenames in os.walk(sys.argv[1]): for filename in filenames: if filename.endswith(".py"): filename = os.path.join(root, filename) print(":: {}".format(filename)) try: par = PythonParser(filename=filename) par.parse_source() par.arcs() except Exception as exc: print(" ** {}".format(exc))
def parse_source(self, text): """Parse `text` as source, and return the `PythonParser` used.""" text = textwrap.dedent(text) parser = PythonParser(text=text, exclude="nocover") parser.parse_source() return parser
class PythonFileReporter(FileReporter): """Report support for a Python file.""" def __init__(self, morf, coverage=None): self.coverage = coverage filename = source_for_morf(morf) super(PythonFileReporter, self).__init__(files.canonical_filename(filename)) if hasattr(morf, '__name__'): name = morf.__name__.replace(".", os.sep) if os.path.basename(filename).startswith('__init__.'): name += os.sep + "__init__" name += ".py" name = files.unicode_filename(name) else: name = files.relative_filename(filename) self.relname = name self._source = None self._parser = None self._statements = None self._excluded = None def __repr__(self): return "<PythonFileReporter {0!r}>".format(self.filename) @contract(returns='unicode') def relative_filename(self): return self.relname @property def parser(self): """Lazily create a :class:`PythonParser`.""" if self._parser is None: self._parser = PythonParser( filename=self.filename, exclude=self.coverage._exclude_regex('exclude'), ) self._parser.parse_source() return self._parser def lines(self): """Return the line numbers of statements in the file.""" return self.parser.statements def excluded_lines(self): """Return the line numbers of statements in the file.""" return self.parser.excluded def translate_lines(self, lines): return self.parser.translate_lines(lines) def translate_arcs(self, arcs): return self.parser.translate_arcs(arcs) @expensive def no_branch_lines(self): no_branch = self.parser.lines_matching( join_regex(self.coverage.config.partial_list), join_regex(self.coverage.config.partial_always_list) ) return no_branch @expensive def arcs(self): return self.parser.arcs() @expensive def exit_counts(self): return self.parser.exit_counts() def missing_arc_description(self, start, end, executed_arcs=None): return self.parser.missing_arc_description(start, end, executed_arcs) @contract(returns='unicode') def source(self): if self._source is None: self._source = get_python_source(self.filename) return self._source def should_be_python(self): """Does it seem like this file should contain Python? This is used to decide if a file reported as part of the execution of a program was really likely to have contained Python in the first place. """ # Get the file extension. _, ext = os.path.splitext(self.filename) # Anything named *.py* should be Python. if ext.startswith('.py'): return True # A file with no extension should be Python. if not ext: return True # Everything else is probably not Python. return False def source_token_lines(self): return source_token_lines(self.source())
def one_file(self, options, filename): """Process just one file.""" # `filename` can have a line number suffix. In that case, extract those # lines, dedent them, and use that. This is for trying test cases # embedded in the test files. match = re.search(r"^(.*):(\d+)-(\d+)$", filename) if match: filename, start, end = match.groups() start, end = int(start), int(end) else: start = end = None try: text = get_python_source(filename) if start is not None: lines = text.splitlines(True) text = textwrap.dedent("".join(lines[start-1:end]).replace("\\\\", "\\")) pyparser = PythonParser(text, filename=filename, exclude=r"no\s*cover") pyparser.parse_source() except Exception as err: print("%s" % (err,)) return if options.dis: print("Main code:") self.disassemble(pyparser.byte_parser, histogram=options.histogram) arcs = pyparser.arcs() if options.source or options.tokens: pyparser.show_tokens = options.tokens pyparser.parse_source() if options.source: arc_chars = self.arc_ascii_art(arcs) if arc_chars: arc_width = max(len(a) for a in arc_chars.values()) exit_counts = pyparser.exit_counts() for lineno, ltext in enumerate(pyparser.lines, start=1): marks = [' ', ' ', ' ', ' ', ' '] a = ' ' if lineno in pyparser.raw_statements: marks[0] = '-' if lineno in pyparser.statements: marks[1] = '=' exits = exit_counts.get(lineno, 0) if exits > 1: marks[2] = str(exits) if lineno in pyparser.raw_docstrings: marks[3] = '"' if lineno in pyparser.raw_classdefs: marks[3] = 'C' if lineno in pyparser.raw_excluded: marks[4] = 'x' if arc_chars: a = arc_chars[lineno].ljust(arc_width) else: a = "" print("%4d %s%s %s" % (lineno, "".join(marks), a, ltext))
def _get_file_lines(file: str) -> Set[int]: parser = PythonParser(filename=file) parser.parse_source() return parser.raw_statements
def parser(self): if self._parser is None: filename = self.filename if self.test_time else self.filename_with_source_root self._parser = PythonParser(filename=filename) self._parser.parse_source() return self._parser