Exemplo n.º 1
0
    def gen_xcov_reports(self):
        """Generate the reports against which we will check expectation
        specs. Request the report format, saved as test.rep, and the xcov
        format (.ad?.xcov outputs) if we're not in qualification mode"""

        # Determine what options we are going to provide as the
        # assessement's inputs.

        # For a single driver, we always rely on a trace as input and we
        # produce a checkpoint for possible future consolidation if the
        # current execution mode calls for it:

        checkpoints = thistest.options.consolidate == 'checkpoints'

        single_driver = no_ext(self.drivers[0]) if self.singletest() else None

        use_checkpoint_inputs = checkpoints and not single_driver

        # We request traces as input with "@inputs.list", where the file
        # contains the list of traces to use, derived from the set of drivers.
        # We request checkpoints as inputs with "[email protected]",
        # where the file contains the list of checkpoints to use, derived
        # from the set of drivers as well:

        (input_opt, input_fn) = \
            ("--checkpoint=", ckptname_for) if use_checkpoint_inputs \
            else ("", tracename_for)

        inputs = "%s@" % input_opt + list_to_file([
            self.awdir_for(no_ext(main)) + input_fn(no_ext(main))
            for main in self.drivers
        ], "inputs.list")

        # We don't need and don't want to pass SCO options when using
        # checkpoints as inputs:

        report_options = self.scoptions if not use_checkpoint_inputs else []

        report_options.extend(['-o', 'test.rep'])

        if single_driver and checkpoints:
            report_options.append("--save-checkpoint=%s" %
                                  ckptname_for(single_driver))

        self.gen_one_xcov_report(inputs,
                                 format="report",
                                 options=report_options)

        # Now produce an alternate .xcov output format, unless we are
        # performing a qualification run, for which that format isn't
        # appropriate.

        if thistest.options.qualif_level:
            return

        xcov_options = self.scoptions if not use_checkpoint_inputs else []

        self.gen_one_xcov_report(inputs, format="xcov", options=xcov_options)
Exemplo n.º 2
0
    def main(self):

        # For a single test, discriminate with driver basename. For a
        # consolidation test, discriminate with the expectation file basename.
        # We need the latter to allow multiple consolidation scenarii for a
        # testcase.

        return (no_ext(self.drivers[0])
                if self.singletest() else os.path.basename(no_ext(self.xfile)))
Exemplo n.º 3
0
 def log(self):
     frame("%s/ %s, %s\n%s coverage with %s" %
           (os.path.relpath(os.getcwd(), thistest.homedir),
            str([no_ext(main) for main in self.drivers]), self.xfile,
            self.testcase.category.name if self.testcase.category else
            "generic", ' '.join(self.covoptions)),
           char='*').display()
Exemplo n.º 4
0
 def units_of_interest(self):
     """Set of units for which we have expectations to match, based
     on the list of sources for which we have expectations and assuming
     standard  use of '-' in filenames for child units or subunits
     (foo-bar.ads for package Foo.Bar).
     """
     return {
         no_ext(os.path.basename(soi)).replace('-', '.')
         for soi in self.sources_of_interest()
     }
Exemplo n.º 5
0
    def locate_ali(self, source):
        """Return the fullpath of the ali file corresponding to the given
        SOURCE file.  Return None if none was found.
        """

        # Whatever the kind of test we are (single or consolidation), we
        # expect every ALI file of interest to be associated with at least
        # one single test, and to be present in the "obj" subdirectory of
        # the associated binary dir.

        # Compute the local path from single test bindir and iterate over
        # binary dir for all our drivers until we find. There might actually
        # be several instances in the consolidation case. We assume they are
        # all identical, and they should be for typical situations where the
        # same sources were exercised by multiple drivers:

        lang_info = language_info(source)
        lali = "obj/" + lang_info.scofile_for(os.path.basename(source))
        for main in self.drivers:
            tloc = self.abdir_for(no_ext(main)) + lali
            if os.path.exists(tloc):
                return tloc

        return None
Exemplo n.º 6
0
           "x.ali" for "x.adb" in Ada or "t.c.gli" for "t.c" in C.
    """
    def __init__(self, name, src_ext, comment, scofile_for):
        self.name = name
        self.src_ext = src_ext
        self.comment = comment
        self.scofile_for = scofile_for


# A dictionary mapping a LangInfo instance to each known language
LANGINFO = {
    "Ada":
    LangInfo(name="Ada",
             src_ext=[".ads", ".adb"],
             comment='--',
             scofile_for=lambda source: (no_ext(source) + '.ali')),
    "C":
    LangInfo(name="C",
             src_ext=[".h", ".c"],
             comment='//',
             scofile_for=lambda source: (source + '.gli')),
    "C++":
    LangInfo(name="C++",
             src_ext=[".hpp", ".cpp", ".cc", ".hh", ".hxx"],
             comment='//',
             scofile_for=None),
    "Asm":
    LangInfo(name="Asm", src_ext=[".s"], comment='#', scofile_for=None),
    "Cons":
    LangInfo(name="Consolidation",
             src_ext=[".txt"],
Exemplo n.º 7
0
    def gen_xcov_reports(self):
        """Generate the reports against which we will check expectation
        specs. Request the report format, saved as test.rep, and the xcov
        format (.ad?.xcov outputs) if we're not in qualification mode"""

        # Determine what options we are going to provide as the
        # assessement's inputs.

        # For a single driver, we always rely on a trace as input and we
        # produce a checkpoint for possible future consolidation if the
        # current execution mode calls for it:

        checkpoints = thistest.options.consolidate == 'checkpoints'

        single_driver = no_ext(self.drivers[0]) if self.singletest() else None

        use_checkpoint_inputs = checkpoints and not single_driver

        # We request traces as input with "@inputs.list", where the file
        # contains the list of traces to use, derived from the set of drivers.
        # We request checkpoints as inputs with "[email protected]",
        # where the file contains the list of checkpoints to use, derived
        # from the set of drivers as well:

        (input_opt, input_fn) = \
            ("--checkpoint=", ckptname_for) if use_checkpoint_inputs \
            else ("", self.mode_tracename_for)

        inputs = "%s@%s" % (input_opt,
                            list_to_file([
                                self.awdir_for(pgm) + input_fn(pgm)
                                for pgm in self.programs()
                            ], "inputs.list"))

        # Determine what command line options we'll pass to designate units of
        # interest and maybe produce a coverage checkpoint. We don't need and
        # don't want to pass SCO options when using checkpoints as inputs.

        sco_options = ([] if use_checkpoint_inputs else
                       self.mode_coverage_sco_options())

        save_checkpoint_options = ([
            "--save-checkpoint=%s" % ckptname_for(single_driver)
        ] if single_driver and checkpoints else [])

        # Now produce the --annotate=report format:

        self.gen_one_xcov_report(inputs,
                                 format="report",
                                 options=sco_options +
                                 save_checkpoint_options + ['-o', 'test.rep'])

        # Then an alternate .xcov output format, unless we are performing a
        # qualification run, for which that format isn't appropriate. No need
        # to regenerate a coverage checkpoint there - it would convey the same
        # as what the --annotate=report already produced if a checkpoint is
        # needed.

        if thistest.options.qualif_level:
            return

        self.gen_one_xcov_report(inputs, format="xcov", options=sco_options)
Exemplo n.º 8
0
    def run(self):
        """Evaluate source coverage as exercised by self.drivers"""

        self.log()

        # Whatever the kind of test, we get to a Working Directory and
        # switch back when done:
        self.to_workdir(self.rwdir())

        # If we are requested to convey units of interest through a project
        # file and don't have a coverage control object to obey, build one to
        # convey the units of interest:

        if thistest.options.gprmode and not self.covctl:
            self.covctl = CovControl(units_in=self.units_of_interest())

        # Assess whether we should be using a project file to convey units of
        # interest, either requested from the command line or for specific
        # test purposes:

        self.gprmode = (thistest.options.gprmode
                        or (self.covctl and self.covctl.requires_gpr()))

        # Compute our GPR now, which we will need for build of single tests
        # and/or analysis later on if in gprmode.  Turn inlining off for the
        # driver unit, so we exercise the functional code as separately
        # compiled, not as an inlined version of it in a non-representative
        # driver context.

        # Most of the tests with coverage control operate within
        # an extra subdir level
        this_depth = (thistest.depth + 1 if self.covctl else thistest.depth)

        self.gpr_obj_dir = 'obj'
        self.gpr = gprfor(
            mains=self.drivers,
            prjid="gen",
            objdir=self.gpr_obj_dir,
            srcdirs=["../" * n + "src" for n in range(1, this_depth)],
            exedir=self.abdir(),
            main_cargs="-fno-inline",
            langs=["Ada", "C"],
            deps=self.covctl.deps if self.covctl else [],
            extra=self.covctl.gpr() if self.covctl else "")

        # For single tests (no consolidation), we first need to build, then
        # to execute to get an execution trace.  All these we already have for
        # consolidation tests, and there's actually no need to build if we
        # were provided a bin directory to reuse:

        if self.singletest() and not self.wdctl.reuse_bin:
            self.mode_build()

        # Do gnatcov run now unless we're consolidating.  We'll just reuse
        # traces from previous executions in the latter case.

        if self.singletest():
            self.run_test(main=no_ext(self.drivers[0]))

        # At this point, we have everything we need for the analysis. Either
        # from the just done build+run in the single test case, or from
        # previous such sequences in the consolidation case.  Run gnatcov
        # coverage to get actual coverage reports and check against our
        # Xpectation specs.

        self.gen_xcov_reports()
        self.check_expectations()

        self.to_homedir()
        thistest.flush()

        # Let callers retrieve execution data at will
        return self
Exemplo n.º 9
0
 def programs(self):
     """List of base binary file names for the test drivers we are
     given to exercise.
     """
     return [no_ext(main) for main in self.drivers]
Exemplo n.º 10
0
    def run(self):
        """Evaluate source coverage as exercised by self.drivers"""

        self.log()

        # Whatever the kind of test, we get to a Working Directory and
        # switch back when done:
        self.to_workdir(self.rwdir())

        # Compute our GPR now, which we will need for build of single tests
        # and/or analysis later on if in gprmode.  Turn inlining off for the
        # driver unit, so we exercise the functional code as separately
        # compiled, not as an inlined version of it in a non-representative
        # driver context.

        # Most of the tests with coverage control operate within
        # an extra subdir level
        this_depth = (thistest.depth + 1 if self.covctl else thistest.depth)

        self.gpr = gprfor(
            mains=self.drivers,
            prjid="gen",
            srcdirs=["../" * n + "src" for n in range(1, this_depth)],
            exedir=self.abdir(),
            main_cargs="-fno-inline",
            langs=["Ada", "C"],
            deps=self.covctl.deps if self.covctl else (),
            extra=self.covctl.gpr() if self.covctl else "")

        # For single tests (no consolidation), we first need to build,
        # producing the binary to execute and the ALIs files, then to gnatcov
        # run to get an execution trace.  All these we already have for
        # consolidation tests, and there's actually no need to build if we
        # were provided a bin directory to reuse:

        if self.singletest() and not self.wdctl.reuse_bin:
            gprbuild(self.gpr, extracargs=self.extracargs)

        # Compute the gnatcov command line argument we'll pass to convey
        # the set of scos to operate upon.  Note that we need these for
        # both gnatcov run and gnatcov coverage.

        thistest.gprmode = (thistest.options.gprmode
                            or (self.covctl and self.covctl.requires_gpr()))

        self.scoptions = (
            to_list(self.covctl.scoptions) if
            (self.covctl and self.covctl.scoptions) else
            ["-P%s" % self.gpr] if thistest.gprmode else
            ["--scos=@%s" % list_to_file(self.ali_list(), "alis.list")])

        # Remember which of these indicate the use of project files, which
        # might influence default output dirs for example.

        self.gproptions = [
            opt for opt in self.scoptions if opt.startswith("-P")
        ]

        # Do gnatcov run now unless we're consolidating.  We'll just reuse
        # traces from previous executions in the latter case.

        if self.singletest():
            self.xcov_run(no_ext(self.drivers[0]))

        # At this point, we have everything we need for the analysis. Either
        # from the just done build+run in the single test case, or from
        # previous such sequences in the consolidation case.  Run gnatcov
        # coverage to get actual coverage reports and check against our
        # Xpectation specs.

        self.gen_xcov_reports()
        self.check_expectations()

        self.to_homedir()
        thistest.flush()

        # Let callers retrieve execution data at will
        return self
Exemplo n.º 11
0
    """
    def __init__(self, name, src_ext, comment, scofile_for, sidfile_for=None):
        self.name = name
        self.src_ext = src_ext
        self.comment = comment
        self.scofile_for = scofile_for
        self.sidfile_for = sidfile_for


# A dictionary mapping a LangInfo instance to each known language
LANGINFO = {
    "Ada":
    LangInfo(name="Ada",
             src_ext=[".ads", ".adb"],
             comment='--',
             scofile_for=lambda source: no_ext(source) + '.ali',
             sidfile_for=lambda source: no_ext(source) + '.sid'),
    "C":
    LangInfo(name="C",
             src_ext=[".h", ".c"],
             comment='//',
             scofile_for=lambda source: source + '.gli',
             sidfile_for=lambda source: source + '.sid'),
    "C++":
    LangInfo(name="C++",
             src_ext=[".hpp", ".cpp", ".cc", ".hh", ".hxx"],
             comment='//',
             scofile_for=None),
    "Asm":
    LangInfo(name="Asm", src_ext=[".s"], comment='#', scofile_for=None),
    "Cons":