Exemplo n.º 1
0
def render_report(output_path, reporter, morfs):
    """Run the provided reporter ensuring any required setup and cleanup is done

    At a high level this method ensures the output file is ready to be written to. Then writes the
    report to it. Then closes the file and deletes any garbage created if necessary.
    """
    file_to_close = None
    delete_file = False
    if output_path:
        if output_path == '-':
            outfile = sys.stdout
        else:
            # Ensure that the output directory is created; done here
            # because this report pre-opens the output file.
            # HTMLReport does this using the Report plumbing because
            # its task is more complex, being multiple files.
            ensure_dir_for_file(output_path)
            open_kwargs = {}
            if env.PY3:
                open_kwargs['encoding'] = 'utf8'
            outfile = open(output_path, "w", **open_kwargs)
            file_to_close = outfile
    try:
        return reporter.report(morfs, outfile=outfile)
    except CoverageException:
        delete_file = True
        raise
    finally:
        if file_to_close:
            file_to_close.close()
            if delete_file:
                file_be_gone(output_path)
Exemplo n.º 2
0
    def combine_parallel_data(self,
                              data,
                              aliases=None,
                              data_paths=None,
                              strict=False):
        """Combine a number of data files together.

        Treat `self.filename` as a file prefix, and combine the data from all
        of the data files starting with that prefix plus a dot.

        If `aliases` is provided, it's a `PathAliases` object that is used to
        re-map paths to match the local machine's.

        If `data_paths` is provided, it is a list of directories or files to
        combine.  Directories are searched for files that start with
        `self.filename` plus dot as a prefix, and those files are combined.

        If `data_paths` is not provided, then the directory portion of
        `self.filename` is used as the directory to search for data files.

        Every data file found and combined is then deleted from disk. If a file
        cannot be read, a warning will be issued, and the file will not be
        deleted.

        If `strict` is true, and no files are found to combine, an error is
        raised.

        """
        # Because of the os.path.abspath in the constructor, data_dir will
        # never be an empty string.
        data_dir, local = os.path.split(self.filename)
        localdot = local + '.*'

        data_paths = data_paths or [data_dir]
        files_to_combine = []
        for p in data_paths:
            if os.path.isfile(p):
                files_to_combine.append(os.path.abspath(p))
            elif os.path.isdir(p):
                pattern = os.path.join(os.path.abspath(p), localdot)
                files_to_combine.extend(glob.glob(pattern))
            else:
                raise CoverageException(
                    "Couldn't combine from non-existent path '%s'" % (p, ))

        if strict and not files_to_combine:
            raise CoverageException("No data to combine")

        for f in files_to_combine:
            new_data = CoverageData()
            try:
                new_data.read_file(f)
            except CoverageException as exc:
                if self.warn:
                    # The CoverageException has the file name in it, so just
                    # use the message as the warning.
                    self.warn(str(exc))
            else:
                data.update(new_data, aliases=aliases)
                file_be_gone(f)
Exemplo n.º 3
0
    def erase(self, parallel=False):
        """Erase the data in this object.

        If `parallel` is true, then also deletes data files created from the
        basename by parallel-mode.

        """
        self._lines = None
        self._arcs = None
        self._file_tracers = {}
        self._runs = []
        self._validate()

        if self._debug.should('dataio'):
            self._debug.write("Erasing data file %r" % (self.filename, ))
        file_be_gone(self.filename)
        if parallel:
            data_dir, local = os.path.split(self.filename)
            localdot = local + '.*'
            pattern = os.path.join(os.path.abspath(data_dir), localdot)
            for filename in glob.glob(pattern):
                if self._debug.should('dataio'):
                    self._debug.write("Erasing parallel data file %r" %
                                      (filename, ))
                file_be_gone(filename)
Exemplo n.º 4
0
 def erase(self):
     """Erase the data, both in this object, and from its file storage."""
     if self.use_file:
         if self.filename:
             file_be_gone(self.filename)
     self.lines = {}
     self.arcs = {}
Exemplo n.º 5
0
 def xml_report(self,
                morfs=None,
                outfile=None,
                ignore_errors=None,
                omit=None,
                include=None):
     self._harvest_data()
     self.config.from_args(ignore_errors=ignore_errors,
                           omit=omit,
                           include=include,
                           xml_output=outfile)
     file_to_close = None
     delete_file = False
     if self.config.xml_output:
         if self.config.xml_output == '-':
             outfile = sys.stdout
         else:
             outfile = open(self.config.xml_output, 'w')
             file_to_close = outfile
     try:
         reporter = XmlReporter(self, self.config)
         return reporter.report(morfs, outfile=outfile)
     except CoverageException:
         delete_file = True
         raise
     finally:
         if file_to_close:
             file_to_close.close()
             if delete_file:
                 file_be_gone(self.config.xml_output)
Exemplo n.º 6
0
    def xml_report(self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None):
        """Generate an XML report of coverage results.

        The report is compatible with Cobertura reports.

        Each module in `morfs` is included in the report.  `outfile` is the
        path to write the file to, "-" will write to stdout.

        See `coverage.report()` for other arguments.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile)
        file_to_close = None
        delete_file = False
        if self.config.xml_output:
            if self.config.xml_output == "-":
                outfile = sys.stdout
            else:
                outfile = open(self.config.xml_output, "w")
                file_to_close = outfile
        try:
            try:
                reporter = XmlReporter(self, self.config)
                return reporter.report(morfs, outfile=outfile)
            except CoverageException:
                delete_file = True
                raise
        finally:
            if file_to_close:
                file_to_close.close()
                if delete_file:
                    file_be_gone(self.config.xml_output)
Exemplo n.º 7
0
 def erase(self):
     """Erase the data, both in this object, and from its file storage."""
     if self.use_file:
         if self.filename:
             file_be_gone(self.filename)
     self.lines = {}
     self.arcs = {}
Exemplo n.º 8
0
def render_report(output_path, reporter, morfs):
    """Run a report generator, managing the output file.

    This function ensures the output file is ready to be written to. Then writes
    the report to it. Then closes the file and cleans up.

    """
    file_to_close = None
    delete_file = False

    if output_path == "-":
        outfile = sys.stdout
    else:
        # Ensure that the output directory is created; done here
        # because this report pre-opens the output file.
        # HTMLReport does this using the Report plumbing because
        # its task is more complex, being multiple files.
        ensure_dir_for_file(output_path)
        open_kwargs = {}
        if env.PY3:
            open_kwargs["encoding"] = "utf8"
        outfile = open(output_path, "w", **open_kwargs)
        file_to_close = outfile

    try:
        return reporter.report(morfs, outfile=outfile)
    except CoverageException:
        delete_file = True
        raise
    finally:
        if file_to_close:
            file_to_close.close()
            if delete_file:
                file_be_gone(
                    output_path)  # pragma: part covered (doesn't return)
Exemplo n.º 9
0
    def combine_parallel_data(self, data, aliases=None, data_paths=None, strict=False):
        """Combine a number of data files together.

        Treat `self.filename` as a file prefix, and combine the data from all
        of the data files starting with that prefix plus a dot.

        If `aliases` is provided, it's a `PathAliases` object that is used to
        re-map paths to match the local machine's.

        If `data_paths` is provided, it is a list of directories or files to
        combine.  Directories are searched for files that start with
        `self.filename` plus dot as a prefix, and those files are combined.

        If `data_paths` is not provided, then the directory portion of
        `self.filename` is used as the directory to search for data files.

        Every data file found and combined is then deleted from disk. If a file
        cannot be read, a warning will be issued, and the file will not be
        deleted.

        If `strict` is true, and no files are found to combine, an error is
        raised.

        """
        # Because of the os.path.abspath in the constructor, data_dir will
        # never be an empty string.
        data_dir, local = os.path.split(self.filename)
        localdot = local + '.*'

        data_paths = data_paths or [data_dir]
        files_to_combine = []
        for p in data_paths:
            if os.path.isfile(p):
                files_to_combine.append(os.path.abspath(p))
            elif os.path.isdir(p):
                pattern = os.path.join(os.path.abspath(p), localdot)
                files_to_combine.extend(glob.glob(pattern))
            else:
                raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))

        if strict and not files_to_combine:
            raise CoverageException("No data to combine")

        for f in files_to_combine:
            new_data = CoverageData(debug=self.debug)
            try:
                new_data.read_file(f)
            except CoverageException as exc:
                if self.warn:
                    # The CoverageException has the file name in it, so just
                    # use the message as the warning.
                    self.warn(str(exc))
            else:
                data.update(new_data, aliases=aliases)
                if self.debug and self.debug.should('dataio'):
                    self.debug.write("Deleting combined data file %r" % (f,))
                file_be_gone(f)
Exemplo n.º 10
0
    def xml_report(
        self,
        morfs=None,
        outfile=None,
        ignore_errors=None,
        omit=None,
        include=None,
    ):
        """Generate an XML report of coverage results.

        The report is compatible with Cobertura reports.

        Each module in `morfs` is included in the report.  `outfile` is the
        path to write the file to, "-" will write to stdout.

        See :meth:`report` for other arguments.

        Returns a float, the total percentage covered.

        """
        self.get_data()
        self.config.from_args(
            ignore_errors=ignore_errors,
            omit=omit,
            include=include,
            xml_output=outfile,
        )
        file_to_close = None
        delete_file = False
        if self.config.xml_output:
            if self.config.xml_output == '-':
                outfile = sys.stdout
            else:
                # Ensure that the output directory is created; done here
                # because this report pre-opens the output file.
                # HTMLReport does this using the Report plumbing because
                # its task is more complex, being multiple files.
                output_dir = os.path.dirname(self.config.xml_output)
                if output_dir and not os.path.isdir(output_dir):
                    os.makedirs(output_dir)
                open_kwargs = {}
                if env.PY3:
                    open_kwargs['encoding'] = 'utf8'
                outfile = open(self.config.xml_output, "w", **open_kwargs)
                file_to_close = outfile
        try:
            reporter = XmlReporter(self, self.config)
            return reporter.report(morfs, outfile=outfile)
        except CoverageException:
            delete_file = True
            raise
        finally:
            if file_to_close:
                file_to_close.close()
                if delete_file:
                    file_be_gone(self.config.xml_output)
Exemplo n.º 11
0
    def xml_report(
        self, morfs=None, outfile=None, ignore_errors=None,
        omit=None, include=None,
    ):
        """Generate an XML report of coverage results.

        The report is compatible with Cobertura reports.

        Each module in `morfs` is included in the report.  `outfile` is the
        path to write the file to, "-" will write to stdout.

        See :meth:`report` for other arguments.

        Returns a float, the total percentage covered.

        """
        self.get_data()
        self.config.from_args(
            ignore_errors=ignore_errors, omit=omit, include=include,
            xml_output=outfile,
            )
        file_to_close = None
        delete_file = False
        if self.config.xml_output:
            if self.config.xml_output == '-':
                outfile = sys.stdout
            else:
                # Ensure that the output directory is created; done here
                # because this report pre-opens the output file.
                # HTMLReport does this using the Report plumbing because
                # its task is more complex, being multiple files.
                output_dir = os.path.dirname(self.config.xml_output)
                if output_dir and not os.path.isdir(output_dir):
                    os.makedirs(output_dir)
                open_kwargs = {}
                if env.PY3:
                    open_kwargs['encoding'] = 'utf8'
                outfile = open(self.config.xml_output, "w", **open_kwargs)
                file_to_close = outfile
        try:
            reporter = XmlReporter(self, self.config)
            return reporter.report(morfs, outfile=outfile)
        except CoverageException:
            delete_file = True
            raise
        finally:
            if file_to_close:
                file_to_close.close()
                if delete_file:
                    file_be_gone(self.config.xml_output)
Exemplo n.º 12
0
    def erase(self, parallel=False):
        """Erase the data from the file storage.

        If `parallel` is true, then also deletes data files created from the
        basename by parallel-mode.

        """
        file_be_gone(self.filename)
        if parallel:
            data_dir, local = os.path.split(self.filename)
            localdot = local + '.*'
            pattern = os.path.join(os.path.abspath(data_dir), localdot)
            for filename in glob.glob(pattern):
                file_be_gone(filename)
Exemplo n.º 13
0
    def erase(self, parallel=False):
        """Erase the data from the file storage.

        If `parallel` is true, then also deletes data files created from the
        basename by parallel-mode.

        """
        file_be_gone(self.filename)
        if parallel:
            data_dir, local = os.path.split(self.filename)
            localdot = local + '.*'
            pattern = os.path.join(os.path.abspath(data_dir), localdot)
            for filename in glob.glob(pattern):
                file_be_gone(filename)
Exemplo n.º 14
0
    def xml_report(self,
                   morfs=None,
                   outfile=None,
                   ignore_errors=None,
                   omit=None,
                   include=None):
        """Generate an XML report of coverage results.

        The report is compatible with Cobertura reports.

        Each module in `morfs` is included in the report.  `outfile` is the
        path to write the file to, "-" will write to stdout.

        See `coverage.report()` for other arguments.

        Returns a float, the total percentage covered.

        """
        self._harvest_data()
        self.config.from_args(
            ignore_errors=ignore_errors,
            omit=omit,
            include=include,
            xml_output=outfile,
        )
        file_to_close = None
        delete_file = False
        if self.config.xml_output:
            if self.config.xml_output == '-':
                outfile = sys.stdout
            else:
                outfile = open(self.config.xml_output, "w")
                file_to_close = outfile
        try:
            try:
                reporter = XmlReporter(self, self.config)
                return reporter.report(morfs, outfile=outfile)
            except CoverageException:
                delete_file = True
                raise
        finally:
            if file_to_close:
                file_to_close.close()
                if delete_file:
                    file_be_gone(self.config.xml_output)
Exemplo n.º 15
0
    def erase(self, parallel=False):
        """Erase the data from the file storage.

        If `parallel` is true, then also deletes data files created from the
        basename by parallel-mode.

        """
        if self.debug and self.debug.should('dataio'):
            self.debug.write("Erasing data file %r" % (self.filename,))
        file_be_gone(self.filename)
        if parallel:
            data_dir, local = os.path.split(self.filename)
            localdot = local + '.*'
            pattern = os.path.join(os.path.abspath(data_dir), localdot)
            for filename in glob.glob(pattern):
                if self.debug and self.debug.should('dataio'):
                    self.debug.write("Erasing parallel data file %r" % (filename,))
                file_be_gone(filename)
Exemplo n.º 16
0
    def erase(self, parallel=False):
        """Erase the data from the file storage.

        If `parallel` is true, then also deletes data files created from the
        basename by parallel-mode.

        """
        if self.debug and self.debug.should('dataio'):
            self.debug.write("Erasing data file %r" % (self.filename,))
        file_be_gone(self.filename)
        if parallel:
            data_dir, local = os.path.split(self.filename)
            localdot = local + '.*'
            pattern = os.path.join(os.path.abspath(data_dir), localdot)
            for filename in glob.glob(pattern):
                if self.debug and self.debug.should('dataio'):
                    self.debug.write("Erasing parallel data file %r" % (filename,))
                file_be_gone(filename)
Exemplo n.º 17
0
    def erase(self, parallel=False):
        """Erase the data in this object.

        If `parallel` is true, then also deletes data files created from the
        basename by parallel-mode.

        """
        self._reset()
        if self._debug.should('dataio'):
            self._debug.write("Erasing data file {!r}".format(self._filename))
        file_be_gone(self._filename)
        if parallel:
            data_dir, local = os.path.split(self._filename)
            localdot = local + '.*'
            pattern = os.path.join(os.path.abspath(data_dir), localdot)
            for filename in glob.glob(pattern):
                if self._debug.should('dataio'):
                    self._debug.write("Erasing parallel data file {!r}".format(filename))
                file_be_gone(filename)
Exemplo n.º 18
0
    def erase(self, parallel=False):
        """Erase the data in this object.

        If `parallel` is true, then also deletes data files created from the
        basename by parallel-mode.

        """
        self._reset()
        if self._debug.should('dataio'):
            self._debug.write("Erasing data file {!r}".format(self._filename))
        file_be_gone(self._filename)
        if parallel:
            data_dir, local = os.path.split(self._filename)
            localdot = local + '.*'
            pattern = os.path.join(os.path.abspath(data_dir), localdot)
            for filename in glob.glob(pattern):
                if self._debug.should('dataio'):
                    self._debug.write("Erasing parallel data file {!r}".format(filename))
                file_be_gone(filename)
Exemplo n.º 19
0
    def combine_parallel_data(self, data, aliases=None, data_paths=None):
        """Combine a number of data files together.

        Treat `self.filename` as a file prefix, and combine the data from all
        of the data files starting with that prefix plus a dot.

        If `aliases` is provided, it's a `PathAliases` object that is used to
        re-map paths to match the local machine's.

        If `data_paths` is provided, it is a list of directories or files to
        combine.  Directories are searched for files that start with
        `self.filename` plus dot as a prefix, and those files are combined.

        If `data_paths` is not provided, then the directory portion of
        `self.filename` is used as the directory to search for data files.

        Every data file found and combined is then deleted from disk.

        """
        # Because of the os.path.abspath in the constructor, data_dir will
        # never be an empty string.
        data_dir, local = os.path.split(self.filename)
        localdot = local + '.*'

        data_paths = data_paths or [data_dir]
        files_to_combine = []
        for p in data_paths:
            if os.path.isfile(p):
                files_to_combine.append(os.path.abspath(p))
            elif os.path.isdir(p):
                pattern = os.path.join(os.path.abspath(p), localdot)
                files_to_combine.extend(glob.glob(pattern))
            else:
                raise CoverageException(
                    "Couldn't combine from non-existent path '%s'" % (p, ))

        for f in files_to_combine:
            new_data = CoverageData()
            new_data.read_file(f)
            data.update(new_data, aliases=aliases)
            file_be_gone(f)
Exemplo n.º 20
0
 def xml_report(self, morfs = None, outfile = None, ignore_errors = None, omit = None, include = None):
     self._harvest_data()
     self.config.from_args(ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile)
     file_to_close = None
     delete_file = False
     if self.config.xml_output:
         if self.config.xml_output == '-':
             outfile = sys.stdout
         else:
             outfile = open(self.config.xml_output, 'w')
             file_to_close = outfile
     try:
         reporter = XmlReporter(self, self.config)
         return reporter.report(morfs, outfile=outfile)
     except CoverageException:
         delete_file = True
         raise
     finally:
         if file_to_close:
             file_to_close.close()
             if delete_file:
                 file_be_gone(self.config.xml_output)
Exemplo n.º 21
0
    def erase(self, parallel=False):
        """Erase the data in this object.

        If `parallel` is true, then also deletes data files created from the
        basename by parallel-mode.

        """
        self._lines = None
        self._arcs = None
        self._file_tracers = {}
        self._runs = []
        self._validate()

        if self._debug.should('dataio'):
            self._debug.write("Erasing data file %r" % (self.filename,))
        file_be_gone(self.filename)
        if parallel:
            data_dir, local = os.path.split(self.filename)
            localdot = local + '.*'
            pattern = os.path.join(os.path.abspath(data_dir), localdot)
            for filename in glob.glob(pattern):
                if self._debug.should('dataio'):
                    self._debug.write("Erasing parallel data file %r" % (filename,))
                file_be_gone(filename)
Exemplo n.º 22
0
 def test_remove_actual_file(self):
     # It really does remove a file that does exist.
     self.make_file("here.txt", "We are here, we are here, we are here!")
     file_be_gone("here.txt")
     self.assert_doesnt_exist("here.txt")
Exemplo n.º 23
0
 def erase(self):
     if self.use_file:
         if self.filename:
             file_be_gone(self.filename)
     self.lines = {}
     self.arcs = {}
Exemplo n.º 24
0
def combine_parallel_data(
    data, aliases=None, data_paths=None, strict=False, keep=False, message=None,
):
    """Combine a number of data files together.

    `data` is a CoverageData.

    Treat `data.filename` as a file prefix, and combine the data from all
    of the data files starting with that prefix plus a dot.

    If `aliases` is provided, it's a `PathAliases` object that is used to
    re-map paths to match the local machine's.

    If `data_paths` is provided, it is a list of directories or files to
    combine.  Directories are searched for files that start with
    `data.filename` plus dot as a prefix, and those files are combined.

    If `data_paths` is not provided, then the directory portion of
    `data.filename` is used as the directory to search for data files.

    Unless `keep` is True every data file found and combined is then deleted from disk. If a file
    cannot be read, a warning will be issued, and the file will not be
    deleted.

    If `strict` is true, and no files are found to combine, an error is
    raised.

    """
    files_to_combine = combinable_files(data.base_filename(), data_paths)

    if strict and not files_to_combine:
        raise NoDataError("No data to combine")

    files_combined = 0
    for f in files_to_combine:
        if f == data.data_filename():
            # Sometimes we are combining into a file which is one of the
            # parallel files.  Skip that file.
            if data._debug.should('dataio'):
                data._debug.write(f"Skipping combining ourself: {f!r}")
            continue
        if data._debug.should('dataio'):
            data._debug.write(f"Combining data file {f!r}")
        try:
            new_data = CoverageData(f, debug=data._debug)
            new_data.read()
        except CoverageException as exc:
            if data._warn:
                # The CoverageException has the file name in it, so just
                # use the message as the warning.
                data._warn(str(exc))
        else:
            data.update(new_data, aliases=aliases)
            files_combined += 1
            if message:
                message(f"Combined data file {os.path.relpath(f)}")
            if not keep:
                if data._debug.should('dataio'):
                    data._debug.write(f"Deleting combined data file {f!r}")
                file_be_gone(f)

    if strict and not files_combined:
        raise NoDataError("No usable data files")
Exemplo n.º 25
0
    def html_file(self, fr, analysis):
        """Generate an HTML file for one source file."""
        rootname = flat_rootname(fr.relative_filename())
        html_filename = rootname + ".html"
        html_path = os.path.join(self.directory, html_filename)

        # Get the numbers for this file.
        nums = analysis.numbers
        self.all_files_nums.append(nums)

        if self.config.skip_covered:
            # Don't report on 100% files.
            no_missing_lines = (nums.n_missing == 0)
            no_missing_branches = (nums.n_partial_branches == 0)
            if no_missing_lines and no_missing_branches:
                # If there's an existing file, remove it.
                file_be_gone(html_path)
                return

        source = fr.source()

        # Find out if the file on disk is already correct.
        this_hash = self.file_hash(source.encode('utf-8'), fr)
        that_hash = self.status.file_hash(rootname)
        if this_hash == that_hash:
            # Nothing has changed to require the file to be reported again.
            self.files.append(self.status.index_info(rootname))
            return

        self.status.set_file_hash(rootname, this_hash)

        if self.has_arcs:
            missing_branch_arcs = analysis.missing_branch_arcs()
            arcs_executed = analysis.arcs_executed()

        # These classes determine which lines are highlighted by default.
        c_run = "run hide_run"
        c_exc = "exc"
        c_mis = "mis"
        c_par = "par " + c_run

        lines = []

        for lineno, line in enumerate(fr.source_token_lines(), start=1):
            # Figure out how to mark this line.
            line_class = []
            annotate_html = ""
            annotate_long = ""
            if lineno in analysis.statements:
                line_class.append("stm")
            if lineno in analysis.excluded:
                line_class.append(c_exc)
            elif lineno in analysis.missing:
                line_class.append(c_mis)
            elif self.has_arcs and lineno in missing_branch_arcs:
                line_class.append(c_par)
                shorts = []
                longs = []
                for b in missing_branch_arcs[lineno]:
                    if b < 0:
                        shorts.append("exit")
                    else:
                        shorts.append(b)
                    longs.append(
                        fr.missing_arc_description(lineno, b, arcs_executed))
                # 202F is NARROW NO-BREAK SPACE.
                # 219B is RIGHTWARDS ARROW WITH STROKE.
                short_fmt = "%s&#x202F;&#x219B;&#x202F;%s"
                annotate_html = ",&nbsp;&nbsp; ".join(short_fmt % (lineno, d)
                                                      for d in shorts)

                if len(longs) == 1:
                    annotate_long = longs[0]
                else:
                    annotate_long = "%d missed branches: %s" % (
                        len(longs),
                        ", ".join(
                            "%d) %s" % (num, ann_long)
                            for num, ann_long in enumerate(longs, start=1)),
                    )
            elif lineno in analysis.statements:
                line_class.append(c_run)

            # Build the HTML for the line.
            html = []
            for tok_type, tok_text in line:
                if tok_type == "ws":
                    html.append(escape(tok_text))
                else:
                    tok_html = escape(tok_text) or '&nbsp;'
                    html.append('<span class="%s">%s</span>' %
                                (tok_type, tok_html))

            lines.append({
                'html': ''.join(html),
                'number': lineno,
                'class': ' '.join(line_class) or "pln",
                'annotate': annotate_html,
                'annotate_long': annotate_long,
            })

        # Write the HTML page for this file.
        html = self.source_tmpl.render({
            'c_exc': c_exc,
            'c_mis': c_mis,
            'c_par': c_par,
            'c_run': c_run,
            'has_arcs': self.has_arcs,
            'extra_css': self.extra_css,
            'fr': fr,
            'nums': nums,
            'lines': lines,
            'time_stamp': self.time_stamp,
        })

        write_html(html_path, html)

        # Save this file's information for the index file.
        index_info = {
            'nums': nums,
            'html_filename': html_filename,
            'relative_filename': fr.relative_filename(),
        }
        self.files.append(index_info)
        self.status.set_index_info(rootname, index_info)
Exemplo n.º 26
0
 def test_actual_errors(self):
     # Errors can still happen.
     # ". is a directory" on Unix, or "Access denied" on Windows
     with self.assertRaises(OSError):
         file_be_gone(".")
Exemplo n.º 27
0
 def test_remove_actual_file(self):
     # It really does remove a file that does exist.
     self.make_file("here.txt", "We are here, we are here, we are here!")
     file_be_gone("here.txt")
     self.assert_doesnt_exist("here.txt")
Exemplo n.º 28
0
 def test_remove_nonexistent_file(self):
     # It's OK to try to remove a file that doesn't exist.
     file_be_gone("not_here.txt")
Exemplo n.º 29
0
 def tearDown(self):
     file_be_gone(self.output_file)
     file_be_gone(self.cov_data_1)
     file_be_gone(self.cov_data_2)
Exemplo n.º 30
0
def combine_parallel_data(data, aliases=None, data_paths=None, strict=False, keep=False):
    """Combine a number of data files together.

    Treat `data.filename` as a file prefix, and combine the data from all
    of the data files starting with that prefix plus a dot.

    If `aliases` is provided, it's a `PathAliases` object that is used to
    re-map paths to match the local machine's.

    If `data_paths` is provided, it is a list of directories or files to
    combine.  Directories are searched for files that start with
    `data.filename` plus dot as a prefix, and those files are combined.

    If `data_paths` is not provided, then the directory portion of
    `data.filename` is used as the directory to search for data files.

    Unless `keep` is True every data file found and combined is then deleted from disk. If a file
    cannot be read, a warning will be issued, and the file will not be
    deleted.

    If `strict` is true, and no files are found to combine, an error is
    raised.

    """
    # Because of the os.path.abspath in the constructor, data_dir will
    # never be an empty string.
    data_dir, local = os.path.split(data.base_filename())
    localdot = local + '.*'

    data_paths = data_paths or [data_dir]
    files_to_combine = []
    for p in data_paths:
        if os.path.isfile(p):
            files_to_combine.append(os.path.abspath(p))
        elif os.path.isdir(p):
            pattern = os.path.join(os.path.abspath(p), localdot)
            files_to_combine.extend(glob.glob(pattern))
        else:
            raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))

    if strict and not files_to_combine:
        raise CoverageException("No data to combine")

    files_combined = 0
    for f in files_to_combine:
        if f == data.data_filename():
            # Sometimes we are combining into a file which is one of the
            # parallel files.  Skip that file.
            if data._debug.should('dataio'):
                data._debug.write("Skipping combining ourself: %r" % (f,))
            continue
        if data._debug.should('dataio'):
            data._debug.write("Combining data file %r" % (f,))
        try:
            new_data = CoverageData(f, debug=data._debug)
            new_data.read()
        except CoverageException as exc:
            if data._warn:
                # The CoverageException has the file name in it, so just
                # use the message as the warning.
                data._warn(str(exc))
        else:
            data.update(new_data, aliases=aliases)
            files_combined += 1
            if not keep:
                if data._debug.should('dataio'):
                    data._debug.write("Deleting combined data file %r" % (f,))
                file_be_gone(f)

    if strict and not files_combined:
        raise CoverageException("No usable data files")
Exemplo n.º 31
0
 def erase(self):
     if self.use_file:
         if self.filename:
             file_be_gone(self.filename)
     self.lines = {}
     self.arcs = {}
Exemplo n.º 32
0
 def test_remove_nonexistent_file(self):
     # It's OK to try to remove a file that doesn't exist.
     file_be_gone("not_here.txt")
Exemplo n.º 33
0
    def html_file(self, fr, analysis):
        """Generate an HTML file for one source file."""
        rootname = flat_rootname(fr.relative_filename())
        html_filename = rootname + ".html"
        html_path = os.path.join(self.directory, html_filename)

        # Get the numbers for this file.
        nums = analysis.numbers
        self.all_files_nums.append(nums)

        if self.config.skip_covered:
            # Don't report on 100% files.
            no_missing_lines = (nums.n_missing == 0)
            no_missing_branches = (nums.n_partial_branches == 0)
            if no_missing_lines and no_missing_branches:
                # If there's an existing file, remove it.
                file_be_gone(html_path)
                return

        source = fr.source()

        # Find out if the file on disk is already correct.
        this_hash = self.file_hash(source.encode('utf-8'), fr)
        that_hash = self.status.file_hash(rootname)
        if this_hash == that_hash:
            # Nothing has changed to require the file to be reported again.
            self.files.append(self.status.index_info(rootname))
            return

        self.status.set_file_hash(rootname, this_hash)

        if self.has_arcs:
            missing_branch_arcs = analysis.missing_branch_arcs()
            arcs_executed = analysis.arcs_executed()

        # These classes determine which lines are highlighted by default.
        c_run = "run hide_run"
        c_exc = "exc"
        c_mis = "mis"
        c_par = "par " + c_run

        lines = []

        for lineno, line in enumerate(fr.source_token_lines(), start=1):
            # Figure out how to mark this line.
            line_class = []
            annotate_html = ""
            annotate_long = ""
            if lineno in analysis.statements:
                line_class.append("stm")
            if lineno in analysis.excluded:
                line_class.append(c_exc)
            elif lineno in analysis.missing:
                line_class.append(c_mis)
            elif self.has_arcs and lineno in missing_branch_arcs:
                line_class.append(c_par)
                shorts = []
                longs = []
                for b in missing_branch_arcs[lineno]:
                    if b < 0:
                        shorts.append("exit")
                    else:
                        shorts.append(b)
                    longs.append(fr.missing_arc_description(lineno, b, arcs_executed))
                # 202F is NARROW NO-BREAK SPACE.
                # 219B is RIGHTWARDS ARROW WITH STROKE.
                short_fmt = "%s&#x202F;&#x219B;&#x202F;%s"
                annotate_html = ",&nbsp;&nbsp; ".join(short_fmt % (lineno, d) for d in shorts)

                if len(longs) == 1:
                    annotate_long = longs[0]
                else:
                    annotate_long = "%d missed branches: %s" % (
                        len(longs),
                        ", ".join("%d) %s" % (num, ann_long)
                            for num, ann_long in enumerate(longs, start=1)),
                    )
            elif lineno in analysis.statements:
                line_class.append(c_run)

            # Build the HTML for the line.
            html = []
            for tok_type, tok_text in line:
                if tok_type == "ws":
                    html.append(escape(tok_text))
                else:
                    tok_html = escape(tok_text) or '&nbsp;'
                    html.append(
                        '<span class="%s">%s</span>' % (tok_type, tok_html)
                    )

            lines.append({
                'html': ''.join(html),
                'number': lineno,
                'class': ' '.join(line_class) or "pln",
                'annotate': annotate_html,
                'annotate_long': annotate_long,
            })

        # Write the HTML page for this file.
        html = self.source_tmpl.render({
            'c_exc': c_exc,
            'c_mis': c_mis,
            'c_par': c_par,
            'c_run': c_run,
            'has_arcs': self.has_arcs,
            'extra_css': self.extra_css,
            'fr': fr,
            'nums': nums,
            'lines': lines,
            'time_stamp': self.time_stamp,
        })

        write_html(html_path, html)

        # Save this file's information for the index file.
        index_info = {
            'nums': nums,
            'html_filename': html_filename,
            'relative_filename': fr.relative_filename(),
        }
        self.files.append(index_info)
        self.status.set_index_info(rootname, index_info)
Exemplo n.º 34
0
 def erase(self):
     """Erase the data from the file storage."""
     file_be_gone(self.filename)
Exemplo n.º 35
0
 def test_actual_errors(self):
     # Errors can still happen.
     # ". is a directory" on Unix, or "Access denied" on Windows
     with self.assertRaises(OSError):
         file_be_gone(".")
Exemplo n.º 36
0
    def html_file(self, fr, analysis):
        """Generate an HTML file for one source file."""
        rootname = flat_rootname(fr.relative_filename())
        html_filename = rootname + ".html"
        ensure_dir(self.directory)
        html_path = os.path.join(self.directory, html_filename)

        # Get the numbers for this file.
        nums = analysis.numbers
        self.all_files_nums.append(nums)

        if self.skip_covered:
            # Don't report on 100% files.
            no_missing_lines = (nums.n_missing == 0)
            no_missing_branches = (nums.n_partial_branches == 0)
            if no_missing_lines and no_missing_branches:
                # If there's an existing file, remove it.
                file_be_gone(html_path)
                self.skipped_covered_count += 1
                return

        if self.skip_empty:
            # Don't report on empty files.
            if nums.n_statements == 0:
                file_be_gone(html_path)
                self.skipped_empty_count += 1
                return

        # Find out if the file on disk is already correct.
        if self.incr.can_skip_file(self.data, fr, rootname):
            self.file_summaries.append(self.incr.index_info(rootname))
            return

        # Write the HTML page for this file.
        file_data = self.datagen.data_for_file(fr, analysis)
        for ldata in file_data.lines:
            # Build the HTML for the line.
            html = []
            for tok_type, tok_text in ldata.tokens:
                if tok_type == "ws":
                    html.append(escape(tok_text))
                else:
                    tok_html = escape(tok_text) or '&nbsp;'
                    html.append(f'<span class="{tok_type}">{tok_html}</span>')
            ldata.html = ''.join(html)

            if ldata.short_annotations:
                # 202F is NARROW NO-BREAK SPACE.
                # 219B is RIGHTWARDS ARROW WITH STROKE.
                ldata.annotate = ",&nbsp;&nbsp; ".join(
                    f"{ldata.number}&#x202F;&#x219B;&#x202F;{d}"
                    for d in ldata.short_annotations)
            else:
                ldata.annotate = None

            if ldata.long_annotations:
                longs = ldata.long_annotations
                if len(longs) == 1:
                    ldata.annotate_long = longs[0]
                else:
                    ldata.annotate_long = "{:d} missed branches: {}".format(
                        len(longs),
                        ", ".join(
                            f"{num:d}) {ann_long}"
                            for num, ann_long in enumerate(longs, start=1)),
                    )
            else:
                ldata.annotate_long = None

            css_classes = []
            if ldata.category:
                css_classes.append(
                    self.template_globals['category'][ldata.category])
            ldata.css_class = ' '.join(css_classes) or "pln"

        html = self.source_tmpl.render(file_data.__dict__)
        write_html(html_path, html)

        # Save this file's information for the index file.
        index_info = {
            'nums': nums,
            'html_filename': html_filename,
            'relative_filename': fr.relative_filename(),
        }
        self.file_summaries.append(index_info)
        self.incr.set_index_info(rootname, index_info)
Exemplo n.º 37
0
 def tearDown(self):
     file_be_gone(self.output_file)
     file_be_gone(self.cov_data_1)
     file_be_gone(self.cov_data_2)