Пример #1
0
from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest
from SUITE.cutils import Wdir, list_to_file

wd = Wdir()

# Check on lone node unit only

wd.to_subdir("wd_1")

TestCase(category=None).run(covcontrol=CovControl(
    ulist_in="../" + list_to_file(["ops"]), xreports=["ops.ads", "ops.adb"]))

# Check on lone node + child unit
wd.to_subdir("wd_2")
TestCase(category=None).run(
    covcontrol=CovControl(ulist_in="../" +
                          list_to_file(["ops", "ops.andthen"]),
                          xreports=["ops.ads", "ops.adb", "ops-andthen.adb"]))

# Check on lone child unit only
wd.to_subdir("wd_3")
TestCase(category=None).run(
    covcontrol=CovControl(ulist_in="../" + list_to_file(["ops.andthen"]),
                          xreports=["ops-andthen.adb"]))

thistest.result()
Пример #2
0
from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest
from SUITE.cutils import Wdir, list_to_file

base_out = ["support", "test_or_ft", "test_and_tt", "test_and_tf"]

wd = Wdir()

# Check on lone node unit only
wd.to_subdir("wd_1")
TestCase(category=None).run(
    covcontrol=CovControl(ulist_out="../" + list_to_file(base_out + ["ops"]),
                          xreports=["ops-andthen.adb", "ops-orelse.adb"]))

# Check on child units only
wd.to_subdir("wd_2")
TestCase(category=None).run(covcontrol=CovControl(
    ulist_out="../" + list_to_file(base_out + ["ops.orelse", "ops.andthen"]),
    xreports=["ops.ads", "ops.adb"]))

# Check on root + child unit
wd.to_subdir("wd_3")
TestCase(category=None).run(
    covcontrol=CovControl(ulist_out="../" +
                          list_to_file(base_out + ["ops", "ops.andthen"]),
                          xreports=["ops-orelse.adb"]))

thistest.result()
Пример #3
0
from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest

tc = TestCase()
tc.run()
tc.run(covcontrol=CovControl(covoptions='-S instance'),
       subdirhint='i_')
thistest.result()
Пример #4
0
import os.path

from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest
from SUITE.gprutils import GPRswitches

libdep = os.path.abspath('mylib/mylib.gpr')
TestCase().run(covcontrol=CovControl(
    deps=[libdep], gprsw=GPRswitches(root_project='gen.gpr', recursive=True)))
thistest.result()
Пример #5
0
    dump = contents_of(dump)

    expected_warning = (
        'no unit {} in project gen (coverage.units attribute)'.format(no_such)
        if no_such else 'no unit of interest')

    thistest.fail_if(
        expected_warning not in dump,
        '[{}] missing warning on absence of ALI for unit'.format(label))


# Empty by specifying a single, non-existing unit in only
wd.to_subdir("wd_1")
try_one_gpr(
    gpr=gprfor(srcdirs="../src", mains="p.adb",
               extra=CovControl(units_in=["no_such_unit"]).gpr()),
    no_such="no_such_unit")

# Empty by excluding the only candidate unit
wd.to_subdir("wd_2")
try_one_gpr(gpr=gprfor(srcdirs="../src", mains="p.adb",
                       extra=CovControl(units_out=["p"]).gpr()),
            no_such=None)

# Empty by including the empty set explicitly
wd.to_subdir("wd_3")
try_one_gpr(
    gpr=gprfor(
        srcdirs="../src", mains="p.adb",
        extra=CovControl(units_in=[]).gpr()),
    no_such=None)
Пример #6
0
from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest

TestCase().run(CovControl(covoptions="--source-search=../../src"))
thistest.result()
Пример #7
0
from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest
from SUITE.cutils import Wdir, list_to_tmp

# Mixing units and lists to include / exclude
wd = Wdir()
wd.to_subdir("wd_1")
TestCase(category=None).run(
    covcontrol=CovControl(units_in=["ops", "ops.andthen"],
                          ulist_out=list_to_tmp(["ops"]),
                          xreports=["ops-andthen.adb"]))

TestCase(category=None).run(
    covcontrol=CovControl(units_in=["ops", "ops.andthen"],
                          units_out=["ops.andthen"],
                          xreports=["ops.ads", "ops.adb"]))

thistest.result()
Пример #8
0
class SCOV_helper:
    """Helper class for source coverage activities."""

    # The differences between the different kinds of traces (binary or source)
    # are handled by specializing a few operations.

    def mode_build(self):
        """For a single test (not consolidation), build the program to run
        out of the test sources.
        """
        raise NotImplementedError

    def mode_execute(self, main):
        """Execute the program designated by MAIN, arranging to produce an
        execution trace. Return the name of a file containing the execution
        output.
        """
        raise NotImplementedError

    def mode_coverage_sco_options(self):
        """Return the gnatcov options to use to convey how SCOs should
        be retrieved at gnatcov coverage analysis time.
        """
        raise NotImplementedError

    def mode_tracename_for(self, pgm):
        raise NotImplementedError

    # --------------
    # -- __init__ --
    # --------------
    def __init__(self, testcase, drivers, xfile, xcovlevel, covctl, wdctl):

        # The TESTCASE object that delegates the hard work to us :-)
        self.testcase = testcase

        # The set of DRIVER sources that we are to exercise. We use this
        # as a precise approximation of a set of main subprogram or local
        # executable names so care about basenames only:
        self.drivers = [os.path.basename(d) for d in drivers]

        # The "--level" argument we ought to use on gnatcov command lines:
        self.xcovlevel = xcovlevel

        # The CovControl object that controls aspects of our coverage
        # testing operations (project file dependencies, units for which
        # we expect reports to be produced, ...)
        self.covctl = covctl

        # Internal attributes: Directory where the instantiation takes place,
        # original expectations file, and base prefix of Working Directory
        # names

        self.homedir = os.getcwd() + "/"
        self.xfile = xfile

        # The WdirControl object telling about the Working and Binary
        # subdir prefixes we are to use:
        self.wdctl = wdctl

        # Compute the gnatcov coverage specific extra options that we'll have
        # to pass. We need these early for Xnote expansions.

        self.covoptions = ['--level=' + self.xcovlevel]
        if self.covctl:
            self.covoptions += to_list(self.covctl.covoptions)

        # Compute the list of test launch options strings that we need for
        # expectation CTL lines.

        ctl_opts = ['--trace-mode=%s' % thistest.options.trace_mode]

        self.extracargs = to_list(self.testcase.extracargs)

        # { sourcename -> KnoteDict } dictionaries of emitted/expected
        # line/report notes. We'll extract emitted notes from reports when we
        # know they have been produced. We extract expected notes from the
        # provided expectation file.

        # This needs to be done now, to make sure that we can register this
        # driver object with maximum details for qualification results before
        # run() is called, hence early wrt possible exception occurrences.

        self.elnotes = {}
        self.ernotes = {}

        xnotes = XnotesExpander(
            xfile=xfile,
            xcov_level=xcovlevel,
            ctl_opts=ctl_opts,
            ctl_cov=self.covoptions,
            ctl_cargs=gprbuild_cargs_with(thiscargs=self.extracargs),
            ctl_tags=thistest.options.tags,
            ctl_cons=[thistest.options.consolidate])
        self.xlnotes = xnotes.xlnotes
        self.xrnotes = xnotes.xrnotes

        # Even though we remember them here, we won't be looking at the
        # xlnotes if we're running for qualification.

        # Empty expectation sets here mean we have not a single source on
        # which anything will be checked. This can only be a mistake and would
        # just pass if we let the processing continue.

        thistest.fail_if(not self.xlnotes, "empty xlnotes from %s !!" % xfile)
        thistest.fail_if(not self.xrnotes, "empty xrnotes from %s !!" % xfile)

    def sources_of_interest(self):
        """List of sources for which we have expectations to match."""
        return self.xrnotes.keys()

    def units_of_interest(self):
        """Set of units for which we have expectations to match, based
        on the list of sources for which we have expectations and assuming
        standard  use of '-' in filenames for child units or subunits
        (foo-bar.ads for package Foo.Bar).
        """
        return {
            no_ext(os.path.basename(soi)).replace('-', '.')
            for soi in self.sources_of_interest()
        }

    def programs(self):
        """List of base binary file names for the test drivers we are
        given to exercise.
        """
        return [no_ext(main) for main in self.drivers]

    # --------------------------
    # -- xcov_translation_for --
    # --------------------------
    def xcov_translation_for(self, source):
        """How a SOURCE reference in expectations translates as the basename
        of an =xcov annotated source file."""

        return source.replace('/', '-')

    # ----------------------------
    # -- report_translation_for --
    # ----------------------------
    def report_translation_for(self, source):
        """How a SOURCE reference in expectations translates in slocs
        found in =report outputs."""

        return os.sep.join(source.split('/'))

    # ----------------
    # -- singletest --
    # ----------------
    def singletest(self):
        """Whether SELF instantiates a single test."""
        return len(self.drivers) == 1

    # ---------
    # -- run --
    # ---------
    def run(self):
        """Evaluate source coverage as exercised by self.drivers"""

        self.log()

        # Whatever the kind of test, we get to a Working Directory and
        # switch back when done:
        self.to_workdir(self.rwdir())

        # If we are requested to convey units of interest through a project
        # file and don't have a coverage control object to obey, build one to
        # convey the units of interest:

        if thistest.options.gprmode and not self.covctl:
            self.covctl = CovControl(units_in=self.units_of_interest())

        # Assess whether we should be using a project file to convey units of
        # interest, either requested from the command line or for specific
        # test purposes:

        self.gprmode = (thistest.options.gprmode
                        or (self.covctl and self.covctl.requires_gpr()))

        # Compute our GPR now, which we will need for build of single tests
        # and/or analysis later on if in gprmode.  Turn inlining off for the
        # driver unit, so we exercise the functional code as separately
        # compiled, not as an inlined version of it in a non-representative
        # driver context.

        # Most of the tests with coverage control operate within
        # an extra subdir level
        this_depth = (thistest.depth + 1 if self.covctl else thistest.depth)

        self.gpr_obj_dir = 'obj'
        self.gpr = gprfor(
            mains=self.drivers,
            prjid="gen",
            objdir=self.gpr_obj_dir,
            srcdirs=["../" * n + "src" for n in range(1, this_depth)],
            exedir=self.abdir(),
            main_cargs="-fno-inline",
            langs=["Ada", "C"],
            deps=self.covctl.deps if self.covctl else [],
            extra=self.covctl.gpr() if self.covctl else "")

        # For single tests (no consolidation), we first need to build, then
        # to execute to get an execution trace.  All these we already have for
        # consolidation tests, and there's actually no need to build if we
        # were provided a bin directory to reuse:

        if self.singletest() and not self.wdctl.reuse_bin:
            self.mode_build()

        # Do gnatcov run now unless we're consolidating.  We'll just reuse
        # traces from previous executions in the latter case.

        if self.singletest():
            self.run_test(main=no_ext(self.drivers[0]))

        # At this point, we have everything we need for the analysis. Either
        # from the just done build+run in the single test case, or from
        # previous such sequences in the consolidation case.  Run gnatcov
        # coverage to get actual coverage reports and check against our
        # Xpectation specs.

        self.gen_xcov_reports()
        self.check_expectations()

        self.to_homedir()
        thistest.flush()

        # Let callers retrieve execution data at will
        return self

    # -------------------------
    # -- working directories --
    # -------------------------

    # Single tests (no consolidation) are each assigned a particular
    # working directory, named after their main unit with a prefix.

    # Consolidation tests are assigned a separate working directory as
    # well, and need access to each of the single directories to retrieve
    # execution traces and binaries.

    def rdir_for(self, base, main):
        """Relative path to Working or Binary Directory for single MAIN."""

        # Strip a possible "test_" prefix. This allows shortening pathnames
        # and the prefix is pointless in providing a unique temp dir.

        return base + main.replace("test_", "", 1) + "/"

    def rwdir_for(self, main):
        """Relative path to Working Directory for single MAIN."""

        return self.rdir_for(base=self.wdctl.wdbase, main=main)

    def rbdir_for(self, main):
        """Relative path to Binary Directory for single MAIN."""

        return self.rdir_for(base=self.wdctl.bdbase, main=main)

    def adir_for(self, rdir):
        """Absolute path from relative dir."""
        return self.homedir + rdir

    def awdir_for(self, main):
        """Absolute path to Working Directory for single MAIN."""
        return self.adir_for(self.rwdir_for(main))

    def abdir_for(self, main):
        """Absolute path to Binary Directory for single MAIN."""
        return self.adir_for(self.rbdir_for(main))

    def main(self):

        # For a single test, discriminate with driver basename. For a
        # consolidation test, discriminate with the expectation file basename.
        # We need the latter to allow multiple consolidation scenarii for a
        # testcase.

        return (no_ext(self.drivers[0])
                if self.singletest() else os.path.basename(no_ext(self.xfile)))

    def rwdir(self):
        """Relative path to Working Directory for current instance."""
        return self.rwdir_for(self.main())

    def awdir(self):
        """Absolute path to Working Directory for current instance."""
        return self.adir_for(self.rwdir())

    def rbdir(self):
        """Relative path to Working Directory for current instance."""
        return self.rbdir_for(self.main())

    def abdir(self):
        """Absolute path to Working Directory for current instance."""
        return self.adir_for(self.rbdir())

    # --------------
    # -- run_test --
    # --------------
    def run_test(self, main):
        """Execute the MAIN program to produce an execution trace, and
        trigger a failure if it raises an unhandled exception."""

        out_file = self.mode_execute(main=main)

        thistest.fail_if(
            match(
                "(!!! EXCEPTION RAISED !!!"
                "|raised [A-Z_]+ : [-._a-zA-Z]+:[0-9]+ \w+)", out_file),
            "exception raised while running '%s'." % main)

    # -------------------------
    # -- gen_one_xcov_report --
    # -------------------------
    def gen_one_xcov_report(self, inputs, format, options=""):
        """Helper for gen_xcov_reports, to produce one specific report for a
        particulat FORMAT, from provided INPUTS. The command output is saved
        in a file named FORMAT.out."""

        # Compute the set of arguments we are to pass to gnatcov coverage.

        # When project files are used, force report output in the current
        # directory where it would be without a project file, and which the
        # project file might arbitrarily redirect otherwise. Doing this
        # conditionally prevents the gratuitous addition of command line
        # options which might be absent from the tool qualified interface
        # descriptions.

        covargs = ['--annotate=' + format, inputs
                   ] + (self.covoptions + to_list(options))

        if self.gprmode:
            covargs.append('--output-dir=.')

        # Run, latching standard output in a file so we can check contents on
        # return.

        ofile = format + ".out"
        p = xcov(args=['coverage'] + covargs, out=ofile)

        # Standard output might typically contain labeling warnings issued
        # by the static analysis phase, or error messages issued when a trace
        # indicates that some unlabeled edge was taken.  None of this should
        # happen so we simply fail as soon as the output file is not empty.
        # Note that we do this in qualification mode as well, even though what
        # we're looking at is not stricly part of the qualified interface.

        thistest.fail_if(
            os.path.getsize(ofile) > 0,
            "xcov standard output not empty (%s):\n--\n%s" %
            (ofile, contents_of(ofile)))

    # ----------------------
    # -- gen_xcov_reports --
    # ----------------------

    def force_xcov_report(self, source):

        filename = self.xcov_translation_for(source) + '.xcov'

        if not os.path.exists(filename):
            report = open(filename, 'w')
            report.write("dummy xcov report")
            report.close()

    def gen_xcov_reports(self):
        """Generate the reports against which we will check expectation
        specs. Request the report format, saved as test.rep, and the xcov
        format (.ad?.xcov outputs) if we're not in qualification mode"""

        # Determine what options we are going to provide as the
        # assessement's inputs.

        # For a single driver, we always rely on a trace as input and we
        # produce a checkpoint for possible future consolidation if the
        # current execution mode calls for it:

        checkpoints = thistest.options.consolidate == 'checkpoints'

        single_driver = no_ext(self.drivers[0]) if self.singletest() else None

        use_checkpoint_inputs = checkpoints and not single_driver

        # We request traces as input with "@inputs.list", where the file
        # contains the list of traces to use, derived from the set of drivers.
        # We request checkpoints as inputs with "[email protected]",
        # where the file contains the list of checkpoints to use, derived
        # from the set of drivers as well:

        (input_opt, input_fn) = \
            ("--checkpoint=", ckptname_for) if use_checkpoint_inputs \
            else ("", self.mode_tracename_for)

        inputs = "%s@%s" % (input_opt,
                            list_to_file([
                                self.awdir_for(pgm) + input_fn(pgm)
                                for pgm in self.programs()
                            ], "inputs.list"))

        # Determine what command line options we'll pass to designate units of
        # interest and maybe produce a coverage checkpoint. We don't need and
        # don't want to pass SCO options when using checkpoints as inputs.

        sco_options = ([] if use_checkpoint_inputs else
                       self.mode_coverage_sco_options())

        save_checkpoint_options = ([
            "--save-checkpoint=%s" % ckptname_for(single_driver)
        ] if single_driver and checkpoints else [])

        # Now produce the --annotate=report format:

        self.gen_one_xcov_report(inputs,
                                 format="report",
                                 options=sco_options +
                                 save_checkpoint_options + ['-o', 'test.rep'])

        # Then an alternate .xcov output format, unless we are performing a
        # qualification run, for which that format isn't appropriate. No need
        # to regenerate a coverage checkpoint there - it would convey the same
        # as what the --annotate=report already produced if a checkpoint is
        # needed.

        if thistest.options.qualif_level:
            return

        self.gen_one_xcov_report(inputs, format="xcov", options=sco_options)

    # ------------------------------
    # -- check_unexpected_reports --
    # ------------------------------

    def check_unexpected_reports(self):
        """Check that we don't have unexpected reports or notes."""

        [
            thistest.fail_if(
                self.covctl.unexpected(s),
                "report note found for %s, not in expected list" % s)
            for s in self.ernotes
        ]

        [
            thistest.fail_if(
                self.covctl.unexpected(s.rstrip(".xcov")),
                "%s report found, for source not in expected list" % s)
            for s in ls("*.xcov")
        ]

    # ------------------------
    # -- check_expectations --
    # ------------------------
    def check_expectations(self):

        # Complain about report notes or xcov reports for unexpected
        # sources, when the list happens to be specified. We need the
        # source->emitted report notes expanded for this purpose.

        # Checking that we do have the expected reports will be performed by
        # the regular coverage expectation assessments triggered below.

        self.ernotes = RnotesExpander("test.rep").ernotes

        if self.covctl and self.covctl.xreports != None:
            self.check_unexpected_reports()

        # When nothing of possible interest shows up for a unit, xcov
        # generates nothing at all. Create dummy reports here to prevent
        # fatal exceptions trying to open them downstream.

        [self.force_xcov_report(source) for source in self.xrnotes]

        # Now expand the reports into source->emitted-notes dictionaries
        # and check against our per-source expectations.

        self.elnotes = LnotesExpander("*.xcov").elnotes

        # Compute a few things that we will need repeatedly over all the
        # sources with expectations to match

        # When we're running for a level stricter than the test category
        # (e.g. running a stmt test with --level=stmt+decision), we
        #
        # * Just ignore some emitted notes, simply irrelevant for the catgory
        #   (e.g. dT-, which doesn't change the statement coverage status of
        #   the outer statement). This is conveyed by the rp_?notes_for sets.
        #
        # * Accept that some emitted notes discharge expectations of some
        #   other kind as well. This is conveyed by the discharge_kdict values
        #   below.
        #
        # * This mechanism is also used to convey that all the relevant notes
        #   may "discharge" anti-expectations, which will cause a test failure
        #   when that happens.

        # Symbolic strength of each category and context level, to let us
        # determine when we're running some test of a given catgeory with a
        # stricter --level

        strength = {
            CAT.stmt: 1,  # categories
            CAT.decision: 2,
            CAT.mcdc: 3,
            "stmt": 1,  # context levels
            "stmt+decision": 2,
            "stmt+mcdc": 3,
            "stmt+uc_mcdc": 3
        }

        stricter_level = (
            self.testcase.category
            and strength[self.xcovlevel] > strength[self.testcase.category])

        # For tests without a category, we will pick the relevant note
        # kinds from the strictest category possibly corresponding to the
        # xcov-level.

        strictest_cat_for = {
            "stmt": CAT.stmt,
            "stmt+decision": CAT.decision,
            "stmt+mcdc": CAT.mcdc
        }

        relevance_cat = (self.testcase.category if self.testcase.category else
                         strictest_cat_for[self.xcovlevel])

        # Setup our report and line discharging configurations (kinds of
        # emitted notes that are allowed to discharge other kinds of expected
        # notes), for =report and =xcov outputs.

        # =report outputs, stricter_level micro relaxations first:

        r_discharge_kdict = ({
            # let an emitted xBlock1 discharge an xBlock0 expectation, as an
            # extra exempted violations are most likely irrelevant for the
            # category
            xBlock0: [xBlock0, xBlock1]
        } if stricter_level else {})

        # Then augment with what is allowed to hit "0" or "0c" expectation
        # statements:

        r_discharge_kdict.update({
            r0: r_ern_for[relevance_cat],
            r0c: r_ern_for[relevance_cat]
        })

        # =xcov outputs, stricter_level micro relaxations only:

        l_discharge_kdict = ({
            # an emitted l! discharge an expected l+, when the l! is most
            # likely caused by irrelevant violations for the category
            lFullCov: [lFullCov, lPartCov],

            # an emitted lx1 discharge an lx0 expectation, when the extra
            # exempted violations are most likely caused by the level extra
            # strictness, hence irrelevant for the category
            lx0: [lx0, lx1]
        } if stricter_level else {})

        # Now process source by source, skipping those for which no report
        # is expected when the list happens to be specified

        [
            self.check_expectations_over(source=source,
                                         relevance_cat=relevance_cat,
                                         r_discharge_kdict=r_discharge_kdict,
                                         l_discharge_kdict=l_discharge_kdict)
            for source in self.xrnotes
            if (not self.covctl or self.covctl.expected(source))
        ]

    def check_expectations_over(self, source, relevance_cat, r_discharge_kdict,
                                l_discharge_kdict):
        """Process expectations for a particular SOURCE, comparing
        expected coverage marks against what is found in the xcov reports
        for this source."""

        frame("Processing UX for %s" % (source), post=0, char='~').display()

        # Source names in expectations might still contain path indications
        # when they reach here, to indicate that the path components are
        # expected to be conveyed in the gnatcov results (slocs in =report
        # outputs and report file name for =xcov outputs).

        # Report notes checks

        strans = self.report_translation_for(source)
        _Xchecker(report='test.rep',
                  xdict=self.xrnotes.get(source),
                  rxp=r_rxp_for[relevance_cat],
                  edict=self.ernotes.get(strans, KnoteDict(erNoteKinds)),
                  ren=r_ern_for[relevance_cat]).run(r_discharge_kdict)

        # Line notes checks, meaningless if we're in qualification mode

        if thistest.options.qualif_level:
            return

        strans = self.xcov_translation_for(source)
        _Xchecker(report=strans + '.xcov',
                  xdict=self.xlnotes.get(source),
                  rxp=r_lxp_for[relevance_cat],
                  edict=self.elnotes.get(strans, KnoteDict(elNoteKinds)),
                  ren=r_eln_for[relevance_cat]).run(l_discharge_kdict)

    # ---------
    # -- log --
    # ---------
    def log(self):
        frame("%s/ %s, %s\n%s coverage with %s" %
              (os.path.relpath(os.getcwd(), thistest.homedir),
               str([no_ext(main) for main in self.drivers]), self.xfile,
               self.testcase.category.name if self.testcase.category else
               "generic", ' '.join(self.covoptions)),
              char='*').display()

    # ----------------
    # -- to_workdir --
    # ----------------
    def to_workdir(self, wdir):
        """Switch to work directory WDIR, creating it if necessary. WDIR is
        expected to be either absolute or relative from the homedir."""

        self.to_homedir()
        mkdir(wdir)
        cd(wdir)

        thistest.log("Work directory: %s" % os.getcwd())

    # ----------------
    # -- to_homedir --
    # ----------------
    def to_homedir(self):
        """Switch to this test's homedir."""
        cd(self.homedir)
Пример #9
0
    def run(self):
        """Evaluate source coverage as exercised by self.drivers"""

        self.log()

        # Whatever the kind of test, we get to a Working Directory and
        # switch back when done:
        self.to_workdir(self.rwdir())

        # If we are requested to convey units of interest through a project
        # file and don't have a coverage control object to obey, build one to
        # convey the units of interest:

        if thistest.options.gprmode and not self.covctl:
            self.covctl = CovControl(units_in=self.units_of_interest())

        # Assess whether we should be using a project file to convey units of
        # interest, either requested from the command line or for specific
        # test purposes:

        self.gprmode = (thistest.options.gprmode
                        or (self.covctl and self.covctl.requires_gpr()))

        # Compute our GPR now, which we will need for build of single tests
        # and/or analysis later on if in gprmode.  Turn inlining off for the
        # driver unit, so we exercise the functional code as separately
        # compiled, not as an inlined version of it in a non-representative
        # driver context.

        # Most of the tests with coverage control operate within
        # an extra subdir level
        this_depth = (thistest.depth + 1 if self.covctl else thistest.depth)

        self.gpr_obj_dir = 'obj'
        self.gpr = gprfor(
            mains=self.drivers,
            prjid="gen",
            objdir=self.gpr_obj_dir,
            srcdirs=["../" * n + "src" for n in range(1, this_depth)],
            exedir=self.abdir(),
            main_cargs="-fno-inline",
            langs=["Ada", "C"],
            deps=self.covctl.deps if self.covctl else [],
            extra=self.covctl.gpr() if self.covctl else "")

        # For single tests (no consolidation), we first need to build, then
        # to execute to get an execution trace.  All these we already have for
        # consolidation tests, and there's actually no need to build if we
        # were provided a bin directory to reuse:

        if self.singletest() and not self.wdctl.reuse_bin:
            self.mode_build()

        # Do gnatcov run now unless we're consolidating.  We'll just reuse
        # traces from previous executions in the latter case.

        if self.singletest():
            self.run_test(main=no_ext(self.drivers[0]))

        # At this point, we have everything we need for the analysis. Either
        # from the just done build+run in the single test case, or from
        # previous such sequences in the consolidation case.  Run gnatcov
        # coverage to get actual coverage reports and check against our
        # Xpectation specs.

        self.gen_xcov_reports()
        self.check_expectations()

        self.to_homedir()
        thistest.flush()

        # Let callers retrieve execution data at will
        return self
Пример #10
0
def check(root_project, recurse, projects=None, units=None, xreports=None):
    """
    Check that running our test with

       -P`root_project`
       [--projects=... for `projects`]
       [--units=... for `units`]
       [--no-subprojects] (if `recurse` is False)

    we obtain reports for the units attached to the projects listed
    in `xreports`.

    If not None, `projects` and `xreports` are expected to be lists of
    shortcut names like 'boolops', 'intops' or 'counters'. This function
    takes care of converting them to relative project file names actually
    expected on the command line or in real project file dependencies.

    `root_project` may be either a .gpr filename, in which case it is used
    as-is, or a project short name.

    `recurse` None means "arrange not to pass any option influencing
    recursiveness".
    """

    projects = to_list(projects)
    units = to_list(units)

    # root_project, projects, and units arguments we will provide to the
    # GPRswitches class:
    gprsw_root_project = (root_project if root_project.endswith('.gpr') else
                          _gpr_for(root_project))

    gprsw_projects = [_gpr_for(prj) for prj in projects]

    gprsw_units = units

    # Arrange to execute each check in its own tmp dir and
    # passing a unique --subdirs prevent mixups across test variants
    # within the shared projects.

    # Start with 'wd_foo' from .../.../foo.gpr or a project short
    # name intended for -P.
    tmpdir = 'wd_' + os.path.basename(root_project).split('.')[0]

    # Append the first letter of each project name will pass through
    # --project, if any:
    if projects:
        tmpdir += '-' + ''.join(prj[0] for prj in projects)

    # Append indication on recursion request:
    if recurse:
        tmpdir += '-rt'
    elif recurse is None:
        tmpdir += '-rn'
    else:
        tmpdir += '-rf'

    # For the --subdirs argument, relative to each subproject's object dir,
    # prepend our testcase local directory name:
    gprsw_subdirs = os.path.basename(os.getcwd()) + '_' + tmpdir

    # If a list of expected reports is provided, convert into list of
    # corresponding sources, which the CovControl class expects:

    if xreports is not None:
        ctl_xreports = []
        for xr in xreports:
            ctl_xreports.extend(_xreports[xr] if xr in _xreports else [xr])
    else:
        ctl_xreports = None

    # Getting the default behavior wrt recursiveness consists
    # in requesting not to pass --no-subprojects.
    gprsw_no_subprojects = False if recurse is None else not recurse

    wd = Wdir(clean=True)
    wd.to_subdir(tmpdir)

    TestCase(category=None).run(covcontrol=CovControl(

        # The programs we build and exercise alway depend on
        # the three subprojects:
        deps=[_gpr_for('boolops'),
              _gpr_for('intops'),
              _gpr_for('counters')],

        # What we analyse and check depends on our arguments:
        gprsw=GPRswitches(root_project=gprsw_root_project,
                          projects=gprsw_projects,
                          units=gprsw_units,
                          no_subprojects=gprsw_no_subprojects,
                          subdirs=gprsw_subdirs,
                          xvars=[('BOARD', env.target.machine)]),
        xreports=ctl_xreports,

        # The test driver and the likes are never of interest
        units_in=[]))

    wd.to_homedir()
Пример #11
0
from SCOV.tctl import CovControl
from SUITE.context import thistest
from SUITE.cutils import Wdir

wd = Wdir()

# Units = (In or All) - Out, order independant

# All (no In) is another testcase
# Out empty (In only) out is another testcase

# Out overlaps In
wd.to_subdir("wd_1")
TestCase(category=None).run(
    covcontrol=CovControl(units_in=["ops", "ops.andthen"],
                          units_out=["ops.andthen"],
                          xreports=["ops.ads", "ops.adb"]))

TestCase(category=None).run(
    covcontrol=CovControl(units_in=["ops", "ops.andthen", "ops.orelse"],
                          units_out=["ops", "ops.andthen"],
                          xreports=["ops-orelse.adb"]))

# Out does not overlap In
wd.to_subdir("wd_2")

TestCase(category=None).run(
    covcontrol=CovControl(units_in=["ops", "ops.orelse"],
                          units_out=["ops.andthen"],
                          xreports=["ops.ads", "ops.adb", "ops-orelse.adb"]))
Пример #12
0
from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest
from SUITE.cutils import Wdir, list_to_tmp

wd = Wdir()

# Remove a parent unit
wd.to_subdir("wd_1")
TestCase(category=None).run(
    covcontrol=CovControl(ulist_in=list_to_tmp(["ops", "ops.andthen"]),
                          ulist_out=list_to_tmp(["ops"]),
                          xreports=["ops-andthen.adb"]))

# Remove a child unit
wd.to_subdir("wd_2")
TestCase(category=None).run(
    covcontrol=CovControl(ulist_in=list_to_tmp(["ops", "ops.andthen"]),
                          ulist_out=list_to_tmp(["ops.andthen"]),
                          xreports=["ops.ads", "ops.adb"]))

# Remove one that's not in
wd.to_subdir("wd_3")
TestCase(category=None).run(
    covcontrol=CovControl(ulist_in=list_to_tmp(["ops", "ops.andthen"]),
                          ulist_out=list_to_tmp(["ops.orelse"]),
                          xreports=["ops.ads", "ops.adb", "ops-andthen.adb"]))

thistest.result()
Пример #13
0
from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest

# In binary trace mode, we need to help gnatcov find the source for
# stacks_g.ads, as no debug info references it (debug info is the only source
# of absolute paths for source files).
TestCase().run(
    CovControl(covoptions="--source-search=../../src") if thistest.options.
    trace_mode == 'bin' else None)
thistest.result()
Пример #14
0
from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest


TestCase(category=None).run(covcontrol=CovControl())
thistest.result()
Пример #15
0
from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest
from SUITE.cutils import Wdir

base_out = ['support', 'test_or_ft', 'test_and_tt', 'test_and_tf']
wd = Wdir()

# Check on lone node unit only
wd.to_subdir('wd_1')
TestCase(category=None).run(
    covcontrol=CovControl(units_out=base_out + ['ops'],
                          xreports=['ops-andthen.adb', 'ops-orelse.adb']))

# Check on child units only
wd.to_subdir('wd_2')
TestCase(category=None).run(
    covcontrol=CovControl(units_out=base_out + ['ops.orelse', 'ops.andthen'],
                          xreports=['ops.ads', 'ops.adb']))

# Check on root + child unit
wd.to_subdir('wd_3')
TestCase(category=None).run(covcontrol=CovControl(
    units_out=base_out + ['ops', 'ops.andthen'], xreports=['ops-orelse.adb']))

thistest.result()
Пример #16
0
from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest
from SUITE.cutils import Wdir

wd = Wdir()

# Check on lone node unit only
wd.to_subdir('wd_1')
TestCase(category=None).run(
    covcontrol=CovControl(units_in=['ops'], xreports=['ops.ads', 'ops.adb']))

# Check on lone node + child unit
wd.to_subdir('wd_2')
TestCase(category=None).run(
    covcontrol=CovControl(units_in=['ops', 'ops.andthen'],
                          xreports=['ops.ads', 'ops.adb', 'ops-andthen.adb']))

# Check on lone child unit only
wd.to_subdir('wd_3')
TestCase(category=None).run(covcontrol=CovControl(
    units_in=['ops.andthen'], xreports=['ops-andthen.adb']))

thistest.result()
Пример #17
0
import os.path

from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest
from SUITE.gprutils import GPRswitches

libdep = os.path.abspath('mylib/mylib.gpr')
TestCase().run(covcontrol=CovControl(
    deps=[libdep],
    gprsw=GPRswitches(root_project='gen.gpr', units=['foo', 'bar', 'klunk'])))
thistest.result()
Пример #18
0
from SCOV.tc import TestCase
from SCOV.tctl import CovControl
from SUITE.context import thistest
from SUITE.cutils import Wdir, list_to_tmp

# Mixing units and lists to exclude
base_out = ["support", "test_or_ft", "test_and_tt", "test_and_tf"]

wd = Wdir()

# Check on lone node unit only
wd.to_subdir("wd_1")
TestCase(category=None).run(
    covcontrol=CovControl(units_out=base_out,
                          ulist_out=list_to_tmp(["ops"]),
                          xreports=["ops-andthen.adb", "ops-orelse.adb"]))

# Check on child units only
wd.to_subdir("wd_2")
TestCase(category=None).run(
    covcontrol=CovControl(units_out=base_out,
                          ulist_out=list_to_tmp(["ops.orelse", "ops.andthen"]),
                          xreports=["ops.ads", "ops.adb"]))

# Check on root + child unit
wd.to_subdir("wd_3")
TestCase(category=None).run(
    covcontrol=CovControl(units_out=base_out,
                          ulist_out=list_to_tmp(["ops", "ops.andthen"]),
                          xreports=["ops-orelse.adb"]))