예제 #1
0
    def __init__(self, text=None, filename=None, exclude=None):
        """
        Source can be provided as `text`, the text itself, or `filename`, from
        which the text will be read.  Excluded lines are those that match
        `exclude`, a regex.

        """
        assert text or filename, "PythonParser needs either text or filename"
        self.filename = filename or "<code>"
        self.text = text
        if not self.text:
            from coverage.python import get_python_source
            try:
                self.text = get_python_source(self.filename)
            except IOError as err:
                raise NoSource(
                    "No source for code: '%s': %s" % (self.filename, err)
                )

        self.exclude = exclude

        # The text lines of the parsed code.
        self.lines = self.text.split('\n')

        # The normalized line numbers of the statements in the code. Exclusions
        # are taken into account, and statements are adjusted to their first
        # lines.
        self.statements = set()

        # The normalized line numbers of the excluded lines in the code,
        # adjusted to their first lines.
        self.excluded = set()

        # The raw_* attributes are only used in this class, and in
        # lab/parser.py to show how this class is working.

        # The line numbers that start statements, as reported by the line
        # number table in the bytecode.
        self.raw_statements = set()

        # The raw line numbers of excluded lines of code, as marked by pragmas.
        self.raw_excluded = set()

        # The line numbers of class and function definitions.
        self.raw_classdefs = set()

        # The line numbers of docstring lines.
        self.raw_docstrings = set()

        # Internal detail, used by lab/parser.py.
        self.show_tokens = False

        # A dict mapping line numbers to lexical statement starts for
        # multi-line statements.
        self._multiline = {}

        # Lazily-created ByteParser, arc data, and missing arc descriptions.
        self._byte_parser = None
        self._all_arcs = None
        self._missing_arc_fragments = None
예제 #2
0
    def __init__(self, text=None, filename=None, exclude=None):
        """
        Source can be provided as `text`, the text itself, or `filename`, from
        which the text will be read.  Excluded lines are those that match
        `exclude`, a regex.

        """
        assert text or filename, "PythonParser needs either text or filename"
        self.filename = filename or "<code>"
        self.text = text
        if not self.text:
            from coverage.python import get_python_source
            try:
                self.text = get_python_source(self.filename)
            except IOError as err:
                raise NoSource(
                    "No source for code: '%s': %s" % (self.filename, err)
                )

        self.exclude = exclude

        # The text lines of the parsed code.
        self.lines = self.text.split('\n')

        # The normalized line numbers of the statements in the code. Exclusions
        # are taken into account, and statements are adjusted to their first
        # lines.
        self.statements = set()

        # The normalized line numbers of the excluded lines in the code,
        # adjusted to their first lines.
        self.excluded = set()

        # The raw_* attributes are only used in this class, and in
        # lab/parser.py to show how this class is working.

        # The line numbers that start statements, as reported by the line
        # number table in the bytecode.
        self.raw_statements = set()

        # The raw line numbers of excluded lines of code, as marked by pragmas.
        self.raw_excluded = set()

        # The line numbers of class and function definitions.
        self.raw_classdefs = set()

        # The line numbers of docstring lines.
        self.raw_docstrings = set()

        # Internal detail, used by lab/parser.py.
        self.show_tokens = False

        # A dict mapping line numbers to lexical statement starts for
        # multi-line statements.
        self._multiline = {}

        # Lazily-created ByteParser, arc data, and missing arc descriptions.
        self._byte_parser = None
        self._all_arcs = None
        self._missing_arc_fragments = None
예제 #3
0
def read_file_with_checksum(absfilename):
    try:
        source = get_python_source(absfilename)
    except NoSource:
        return None, None
    hasher = hashlib.sha1()
    hasher.update(source.encode("utf-8"))
    return source, hasher.hexdigest()
예제 #4
0
    def one_file(self, options, filename):
        """Process just one file."""

        try:
            text = get_python_source(filename)
            bp = ByteParser(text, filename=filename)
        except Exception as err:
            print("%s" % (err, ))
            return

        if options.dis:
            print("Main code:")
            self.disassemble(bp, histogram=options.histogram)

        arcs = bp._all_arcs()
        if options.chunks:
            chunks = bp._all_chunks()
            if options.recursive:
                print("%6d: %s" % (len(chunks), filename))
            else:
                print("Chunks: %r" % chunks)
                print("Arcs: %r" % sorted(arcs))

        if options.source or options.tokens:
            cp = PythonParser(filename=filename, exclude=r"no\s*cover")
            cp.show_tokens = options.tokens
            cp.parse_source()

            if options.source:
                arc_width = 0
                arc_chars = {}
                if options.chunks:
                    arc_chars = self.arc_ascii_art(arcs)
                    if arc_chars:
                        arc_width = max(len(a) for a in arc_chars.values())

                exit_counts = cp.exit_counts()

                for lineno, ltext in enumerate(cp.lines, start=1):
                    m0 = m1 = m2 = m3 = a = ' '
                    if lineno in cp.statements:
                        m0 = '='
                    elif lineno in cp.raw_statements:
                        m0 = '-'
                    exits = exit_counts.get(lineno, 0)
                    if exits > 1:
                        m1 = str(exits)
                    if lineno in cp.raw_docstrings:
                        m2 = '"'
                    if lineno in cp.raw_classdefs:
                        m2 = 'C'
                    if lineno in cp.raw_excluded:
                        m3 = 'x'
                    a = arc_chars[lineno].ljust(arc_width)
                    print("%4d %s%s%s%s%s %s" %
                          (lineno, m0, m1, m2, m3, a, ltext))
예제 #5
0
def make_code_from_py(filename):
    """Get source from `filename` and make a code object of it."""
    # Open the source file.
    try:
        source = get_python_source(filename)
    except (IOError, NoSource):
        raise NoSource("No file to run: '%s'" % filename)

    code = compile_unicode(source, filename, "exec")
    return code
예제 #6
0
def make_code_from_py(filename):
    """Get source from `filename` and make a code object of it."""
    # Open the source file.
    try:
        source = get_python_source(filename)
    except (IOError, NoSource):
        raise NoSource("No file to run: '%s'" % filename)

    code = compile_unicode(source, filename, "exec")
    return code
예제 #7
0
    def one_file(self, options, filename):
        """Process just one file."""

        try:
            text = get_python_source(filename)
            bp = ByteParser(text, filename=filename)
        except Exception as err:
            print("%s" % (err,))
            return

        if options.dis:
            print("Main code:")
            self.disassemble(bp, histogram=options.histogram)

        arcs = bp._all_arcs()
        if options.chunks:
            chunks = bp._all_chunks()
            if options.recursive:
                print("%6d: %s" % (len(chunks), filename))
            else:
                print("Chunks: %r" % chunks)
                print("Arcs: %r" % sorted(arcs))

        if options.source or options.tokens:
            cp = PythonParser(filename=filename, exclude=r"no\s*cover")
            cp.show_tokens = options.tokens
            cp.parse_source()

            if options.source:
                arc_width = 0
                arc_chars = {}
                if options.chunks:
                    arc_chars = self.arc_ascii_art(arcs)
                    if arc_chars:
                        arc_width = max(len(a) for a in arc_chars.values())

                exit_counts = cp.exit_counts()

                for lineno, ltext in enumerate(cp.lines, start=1):
                    m0 = m1 = m2 = m3 = a = ' '
                    if lineno in cp.statements:
                        m0 = '='
                    elif lineno in cp.raw_statements:
                        m0 = '-'
                    exits = exit_counts.get(lineno, 0)
                    if exits > 1:
                        m1 = str(exits)
                    if lineno in cp.raw_docstrings:
                        m2 = '"'
                    if lineno in cp.raw_classdefs:
                        m2 = 'C'
                    if lineno in cp.raw_excluded:
                        m3 = 'x'
                    a = arc_chars[lineno].ljust(arc_width)
                    print("%4d %s%s%s%s%s %s" % (lineno, m0, m1, m2, m3, a, ltext))
예제 #8
0
    def __init__(self, text=None, filename=None, exclude=None):
        """
        Source can be provided as `text`, the text itself, or `filename`, from
        which the text will be read.  Excluded lines are those that match
        `exclude`, a regex.

        """
        assert text or filename, "PythonParser needs either text or filename"
        self.filename = filename or "<code>"
        self.text = text
        if not self.text:
            from coverage.python import get_python_source
            try:
                self.text = get_python_source(self.filename)
            except IOError as err:
                raise NoSource(
                    "No source for code: '%s': %s" % (self.filename, err)
                    )

        if self.text:
            assert isinstance(self.text, str)
            # Scrap the BOM if it exists.
            # (Used to do this, but no longer.  Not sure what bad will happen
            # if we don't do it.)
            #   if ord(self.text[0]) == 0xfeff:
            #       self.text = self.text[1:]

        self.exclude = exclude

        self.show_tokens = False

        # The text lines of the parsed code.
        self.lines = self.text.split('\n')

        # The line numbers of excluded lines of code.
        self.excluded = set()

        # The line numbers of docstring lines.
        self.docstrings = set()

        # The line numbers of class definitions.
        self.classdefs = set()

        # A dict mapping line numbers to (lo,hi) for multi-line statements.
        self.multiline = {}

        # The line numbers that start statements.
        self.statement_starts = set()

        # Lazily-created ByteParser
        self._byte_parser = None
예제 #9
0
    def __init__(self, text=None, filename=None, exclude=None):
        """
        Source can be provided as `text`, the text itself, or `filename`, from
        which the text will be read.  Excluded lines are those that match
        `exclude`, a regex.

        """
        assert text or filename, "PythonParser needs either text or filename"
        self.filename = filename or "<code>"
        self.text = text
        if not self.text:
            from coverage.python import get_python_source
            try:
                self.text = get_python_source(self.filename)
            except IOError as err:
                raise NoSource("No source for code: '%s': %s" %
                               (self.filename, err))

        if self.text:
            assert isinstance(self.text, str)
            # Scrap the BOM if it exists.
            # (Used to do this, but no longer.  Not sure what bad will happen
            # if we don't do it.)
            #   if ord(self.text[0]) == 0xfeff:
            #       self.text = self.text[1:]

        self.exclude = exclude

        self.show_tokens = False

        # The text lines of the parsed code.
        self.lines = self.text.split('\n')

        # The line numbers of excluded lines of code.
        self.excluded = set()

        # The line numbers of docstring lines.
        self.docstrings = set()

        # The line numbers of class definitions.
        self.classdefs = set()

        # A dict mapping line numbers to (lo,hi) for multi-line statements.
        self.multiline = {}

        # The line numbers that start statements.
        self.statement_starts = set()

        # Lazily-created ByteParser
        self._byte_parser = None
예제 #10
0
파일: parser.py 프로젝트: LadanP/coveragepy
    def __init__(self, text=None, filename=None, exclude=None):
        """
        Source can be provided as `text`, the text itself, or `filename`, from
        which the text will be read.  Excluded lines are those that match
        `exclude`, a regex.

        """
        assert text or filename, "PythonParser needs either text or filename"
        self.filename = filename or "<code>"
        self.text = text
        if not self.text:
            from coverage.python import get_python_source
            try:
                self.text = get_python_source(self.filename)
            except IOError as err:
                raise NoSource(
                    "No source for code: '%s': %s" % (self.filename, err)
                )

        self.exclude = exclude

        self.show_tokens = False

        # The text lines of the parsed code.
        self.lines = self.text.split('\n')

        # The line numbers of excluded lines of code.
        self.excluded = set()

        # The line numbers of docstring lines.
        self.docstrings = set()

        # The line numbers of class definitions.
        self.classdefs = set()

        # A dict mapping line numbers to (lo,hi) for multi-line statements.
        self.multiline = {}

        # The line numbers that start statements.
        self.statement_starts = set()

        # Lazily-created ByteParser and arc data.
        self._byte_parser = None
        self._all_arcs = None
예제 #11
0
    def __init__(self, text=None, filename=None, exclude=None):
        """
        Source can be provided as `text`, the text itself, or `filename`, from
        which the text will be read.  Excluded lines are those that match
        `exclude`, a regex.

        """
        assert text or filename, "PythonParser needs either text or filename"
        self.filename = filename or "<code>"
        self.text = text
        if not self.text:
            from coverage.python import get_python_source
            try:
                self.text = get_python_source(self.filename)
            except IOError as err:
                raise NoSource(
                    "No source for code: '%s': %s" % (self.filename, err)
                )

        self.exclude = exclude

        self.show_tokens = False

        # The text lines of the parsed code.
        self.lines = self.text.split('\n')

        # The line numbers of excluded lines of code.
        self.excluded = set()

        # The line numbers of docstring lines.
        self.docstrings = set()

        # The line numbers of class definitions.
        self.classdefs = set()

        # A dict mapping line numbers to (lo,hi) for multi-line statements.
        self.multiline = {}

        # The line numbers that start statements.
        self.statement_starts = set()

        # Lazily-created ByteParser and arc data.
        self._byte_parser = None
        self._all_arcs = None
예제 #12
0
파일: plugin.py 프로젝트: wisechengyi/pants
 def source(self):
     if self._source is None:
         self._source = get_python_source(self.filename_with_source_root)
     return self._source
예제 #13
0
 def check_file_tokenization(self, fname):
     """Use the contents of `fname` for `check_tokenization`."""
     self.check_tokenization(get_python_source(fname))
예제 #14
0
    def one_file(self, options, filename):
        """Process just one file."""
        # `filename` can have a line number suffix. In that case, extract those
        # lines, dedent them, and use that.  This is for trying test cases
        # embedded in the test files.
        match = re.search(r"^(.*):(\d+)-(\d+)$", filename)
        if match:
            filename, start, end = match.groups()
            start, end = int(start), int(end)
        else:
            start = end = None

        try:
            text = get_python_source(filename)
            if start is not None:
                lines = text.splitlines(True)
                text = textwrap.dedent("".join(lines[start - 1:end]).replace(
                    "\\\\", "\\"))
            pyparser = PythonParser(text,
                                    filename=filename,
                                    exclude=r"no\s*cover")
            pyparser.parse_source()
        except Exception as err:
            print(f"{err}")
            return

        if options.dis:
            print("Main code:")
            self.disassemble(pyparser.byte_parser, histogram=options.histogram)

        arcs = pyparser.arcs()

        if options.source or options.tokens:
            pyparser.show_tokens = options.tokens
            pyparser.parse_source()

            if options.source:
                arc_chars = self.arc_ascii_art(arcs)
                if arc_chars:
                    arc_width = max(len(a) for a in arc_chars.values())

                exit_counts = pyparser.exit_counts()

                for lineno, ltext in enumerate(pyparser.lines, start=1):
                    marks = [' ', ' ', ' ', ' ', ' ']
                    a = ' '
                    if lineno in pyparser.raw_statements:
                        marks[0] = '-'
                    if lineno in pyparser.statements:
                        marks[1] = '='
                    exits = exit_counts.get(lineno, 0)
                    if exits > 1:
                        marks[2] = str(exits)
                    if lineno in pyparser.raw_docstrings:
                        marks[3] = '"'
                    if lineno in pyparser.raw_classdefs:
                        marks[3] = 'C'
                    if lineno in pyparser.raw_excluded:
                        marks[4] = 'x'

                    if arc_chars:
                        a = arc_chars[lineno].ljust(arc_width)
                    else:
                        a = ""

                    print("%4d %s%s %s" % (lineno, "".join(marks), a, ltext))
예제 #15
0
파일: parser.py 프로젝트: d-b-w/coveragepy
    def one_file(self, options, filename):
        """Process just one file."""
        # `filename` can have a line number suffix. In that case, extract those
        # lines, dedent them, and use that.  This is for trying test cases
        # embedded in the test files.
        match = re.search(r"^(.*):(\d+)-(\d+)$", filename)
        if match:
            filename, start, end = match.groups()
            start, end = int(start), int(end)
        else:
            start = end = None

        try:
            text = get_python_source(filename)
            if start is not None:
                lines = text.splitlines(True)
                text = textwrap.dedent("".join(lines[start-1:end]).replace("\\\\", "\\"))
            pyparser = PythonParser(text, filename=filename, exclude=r"no\s*cover")
            pyparser.parse_source()
        except Exception as err:
            print("%s" % (err,))
            return

        if options.dis:
            print("Main code:")
            self.disassemble(pyparser.byte_parser, histogram=options.histogram)

        arcs = pyparser.arcs()

        if options.source or options.tokens:
            pyparser.show_tokens = options.tokens
            pyparser.parse_source()

            if options.source:
                arc_chars = self.arc_ascii_art(arcs)
                if arc_chars:
                    arc_width = max(len(a) for a in arc_chars.values())

                exit_counts = pyparser.exit_counts()

                for lineno, ltext in enumerate(pyparser.lines, start=1):
                    marks = [' ', ' ', ' ', ' ', ' ']
                    a = ' '
                    if lineno in pyparser.raw_statements:
                        marks[0] = '-'
                    if lineno in pyparser.statements:
                        marks[1] = '='
                    exits = exit_counts.get(lineno, 0)
                    if exits > 1:
                        marks[2] = str(exits)
                    if lineno in pyparser.raw_docstrings:
                        marks[3] = '"'
                    if lineno in pyparser.raw_classdefs:
                        marks[3] = 'C'
                    if lineno in pyparser.raw_excluded:
                        marks[4] = 'x'

                    if arc_chars:
                        a = arc_chars[lineno].ljust(arc_width)
                    else:
                        a = ""

                    print("%4d %s%s %s" % (lineno, "".join(marks), a, ltext))
예제 #16
0
 def check_file_tokenization(self, fname):
     """Use the contents of `fname` for `check_tokenization`."""
     self.check_tokenization(get_python_source(fname))