Esempio n. 1
0
    def __parse_lcx(self, image):
        """
        Parse IMAGE as a string that contains a line expectation spec and
        return the corresponding LineCX object.
        """

        # Extract the various parts of interest from the image

        m = re.match(
            string=image,
            pattern=r"\s*(?P<lre>=?/.*?/)\s+(?P<lnote>.*) ## (?P<rnotes>.*)")

        if m is None:
            raise FatalError("Invalid '%s' line expectation spec."
                             "\nExpected /LRE/ lnotes ## rnotes" % image)

        lx_lre = m.group("lre")

        lre_override = lx_lre[0] == '='
        if lx_lre[0] != '/':
            lx_lre = lx_lre[1:]

        lx_lre = lx_lre.strip('/')

        lx_lnote = XnoteP(text=self.__select_lnote(m.group("lnote")),
                          stext=None)

        lx_rnotes = self.__parse_expected_rnotes(m.group("rnotes"))

        # If none of the stated report expectation applies to the current
        # xcov-level, default to the empty set:

        if not lx_rnotes:
            lx_rnotes = [XnoteP(text="0", stext=None)]

        # If we have both an empty-set expectation and something else,
        # expectations are wrong. This is a safeguard against a common
        # mistake, thinking, say, that "d=>dT-, 0" means "dT- if
        # --level=stmt+decision, 0 _otherwise_", while it means "dT- etc, 0
        # _always_ (not tied to any particular level)" instead.

        else:
            thistest.stop_if(
                len(lx_rnotes) > 1 and "0" in lx_rnotes,
                FatalError("Contradictory =report expectation in %s" % image))

        return LineCX(lre=lx_lre,
                      lnp=lx_lnote,
                      rnps=lx_rnotes,
                      override=lre_override)
Esempio n. 2
0
    def __init__(self, extradrivers="", extracargs="", category=CAT.auto):
        # By default, these test cases expect no error from subprocesses (xrun,
        # xcov, etc.)
        self.expect_failures = False

        # Step 1: Compute the list of drivers and consolidation specs
        #         to exercise
        # ------------------------------------------------------------

        # Drivers first. Probe all those from src/ plus those explicitely
        # provided. If that is empty, see if we have bodies aimed at being
        # exercised by common drivers up-tree. Abort if there's nothing to
        # exercise at all
        self.all_drivers = []
        self.__expand_drivers("src/test_*.adb src/test_*.c " + extradrivers)

        if not self.all_drivers:
            self.__expand_shared_controllers(drivers=True, cspecs=False)

        thistest.stop_if(not self.all_drivers,
                         FatalError("Request to exercise empty test_set"))

        # Seek consolidation specs, then. Similar scheme, local check first,
        # then seek shared entities
        self.all_cspecs = []
        self.__expand_cspecs("src/cons_*.txt")

        if not self.all_cspecs:
            self.__expand_shared_controllers(drivers=False, cspecs=True)

        # Step 2: Determine a few test parameters common to all drivers
        # -------------------------------------------------------------

        # - test category:
        #
        # If automatic detection from subdir was requested, do that.
        # Otherwise, use the provided argument, which might be None or a
        # criterion related value.
        self.category = (self.__category_from_dir()
                         if category == CAT.auto else category)

        # - extra compilation arguments, added to what --cargs was provided to
        #   the testsuite command line:
        self.extracargs = extracargs

        # Step 3: Setup qualification data for this testcase
        # --------------------------------------------------

        self.qdata = Qdata(tcid=TEST_DIR)
Esempio n. 3
0
    def __init__(self, text, stext=None, stag=None):

        # WEAK conveys whether it is ok (not triggering test failure) for
        # expectations produced by this pattern not to be discharged by an
        # emitted note.

        self.weak = text[0] == '~'
        if self.weak:
            text = text[1:]

        # KIND is the kind of note this expectation stands for

        self.kind = self.NK_for[text]

        # STEXT is a substring in matching source lines where the sloc of an
        # emitted note must fall to discharge. For example:
        #
        #         6 - 10
        #         v   v
        #  4:  if Cond1 and then Cond2 then  -- # expr
        #
        #  /expr/  c!:"Cond1"
        #
        # yields "Cond1" in stext meaning that we must find an emitted note
        # with a sloc pointing somewhere within col 6 and col 10 to discharge
        # the expectation for line 4 here.

        self.stext = stext

        # We could require and use stext to store expected justification text
        # for exemptions. We don't handle that as of today.

        thistest.stop_if(
            False and self.stext is None and self.kind in xNoteKinds,
            FatalError("expected justification text required for %s" % text))

        # STAG is the separation tag that we must find on an emitted note to
        # discharge expectations produced from this pattern. Initially, at this
        # __init__ point, this is set with the stag text found.

        self.stag = stag

        # Setup our instanciation factory now, which lets us perform the
        # required test only once:

        self.factory = (_XnoteP_block(notep=self) if block_p(
            self.kind) else _XnoteP_line(notep=self) if not self.stext else
                        _XnoteP_segment(notep=self, stext=stext))
Esempio n. 4
0
    def __select_srlist_from(self, candlists):
        """
        Search and return the one good list of units amongst the candidates we
        have.
        """

        goodlists = []
        for slist in candlists:
            self.__examine_source_list(slist, goodlists)

        thistest.stop_if(
            len(goodlists) != 1,
            FatalError("goodlists = %d, != 1 for %s" %
                       (len(goodlists), candlists)))

        return goodlists[0]
Esempio n. 5
0
    def __init__(self, sref, LXset):

        self.LXset = LXset

        # dictionaries of expected line and report notes for our unit

        self.xldict = KnoteDict(xlNoteKinds)
        self.xrdict = KnoteDict(xrNoteKinds)

        self.current_block = None
        self.current_srules = {}

        self.tfile = Tfile(filename=sref.spath, process=self.process_tline)

        self.sref = sref

        thistest.stop_if(self.current_block,
                         FatalError("fuzz block still open at EOF"))
Esempio n. 6
0
def gprinstall(project, prefix=None):
    """
    Run "gprinstall" on the provided project file.

    :param None|str prefix: If a string is passed, add `--prefix=PREFIX` to the
        gprinstall command-line.
    """
    ofile = 'gprinstall.out'
    args = ['gprinstall', '-P', project, '-p']
    if prefix:
        args.append('--prefix={}'.format(prefix))

    # Add mandatory options, such as target and RTS info
    args.extend(thistest.gprconfoptions)
    args.extend(thistest.gprvaroptions)

    p = run_and_log(args, output=ofile, timeout=thistest.options.timeout)
    thistest.stop_if(p.status != 0,
                     FatalError('gprinstall exit in error', ofile))
Esempio n. 7
0
    def instanciate_over(self, tline, block, kind):

        thisni = Xnote(xnp=self.notep, block=block, kind=kind)

        # Register matches for Segments corresponding to all the instances of
        # the subtext we find, possibly extended.  Error out if too few or too
        # many.

        # Compute a base subtext to start from and whether we should extend or
        # not.

        base = self.stext
        if self.stext.startswith('(') and self.stext.endswith('*)'):
            base = self.stext[0:-2]
            extend = True
        else:
            extend = False

        # As we use RE services to find matches for the string, and their
        # position in the line, we need to escape it to make sure any special
        # character is not considered as part of a regexp expression.
        base = re.escape(base)
        for bm in re.finditer(pattern=base, string=tline.text):
            thisni.register_match(
                Segment(
                    tline.lno,
                    bm.start() + 1,
                    self.__extended_segend_for(bm=bm, tline=tline)
                    if extend else bm.end()))

        thistest.stop_if(
            thisni.nmatches == 0,
            FatalError("couldn't find subtext '%s' in line '%s'" %
                       (self.stext, tline.text)))

        thistest.stop_if(
            thisni.nmatches > 1,
            FatalError("multiple matches of subtext '%s' in line '%s'" %
                       (self.stext, tline.text)))

        return thisni
Esempio n. 8
0
    def instanciate_over(self, tline, block, kind):

        thisni = Xnote(xnp=self.notep, block=block, kind=kind)

        # Register matches for Segments corresponding to all the instances of
        # the subtext we find, possibly extended.  Error out if too few or too
        # many.

        # Compute a base subtext to start from and whether we should extend or
        # not. If we should, include the opening paren as part of the base,
        # escaped as we're going to use RE services to test for multiple
        # occurrences.

        if self.stext.startswith('(') and self.stext.endswith('*)'):
            base = '\\' + self.stext[0:-2]
            extend = True
        else:
            base = self.stext
            extend = False

        for bm in re.finditer(pattern=base, string=tline.text):
            thisni.register_match(
                Segment(
                    tline.lno,
                    bm.start() + 1,
                    self.__extended_segend_for(bm=bm, tline=tline)
                    if extend else bm.end()))

        thistest.stop_if(
            thisni.nmatches == 0,
            FatalError("couldn't find subtext '%s' in line '%s'" %
                       (self.stext, tline.text)))

        thistest.stop_if(
            thisni.nmatches > 1,
            FatalError("multiple matches of subtext '%s' in line '%s'" %
                       (self.stext, tline.text)))

        return thisni
Esempio n. 9
0
def gprbuild(project,
             scovcargs=True,
             suitecargs=True,
             extracargs=None,
             gargs=None,
             largs=None,
             trace_mode=None,
             out='gprbuild.out'):
    """
    Cleanup & build the provided PROJECT file using gprbuild, passing
    GARGS/CARGS/LARGS as gprbuild/cargs/largs command-line switches. Each
    of these arguments may be either None, a list of options, or a string
    containing a space-separated list of options.

    SCOVCARGS tell whether or not we should prepend BUILDER.SCOV_CARGS to
    the -cargs switches.

    SUITECARGS tells whether or not we should also add the -cargs passed on
    the testsuite toplevel command line.

    OUT is the name of the file to contain gprbuild's output.
    """

    # Fetch options, from what is requested specifically here
    # or from command line requests
    all_gargs = gprbuild_gargs_with(thisgargs=gargs, trace_mode=trace_mode)
    all_largs = gprbuild_largs_with(thislargs=largs)
    all_cargs = gprbuild_cargs_with(scovcargs=scovcargs,
                                    suitecargs=suitecargs,
                                    thiscargs=extracargs)

    # Now cleanup, do build and check status
    thistest.cleanup(project)

    args = (to_list(BUILDER.BASE_COMMAND) + ['-P%s' % project] + all_gargs +
            all_cargs + all_largs)
    p = run_and_log(args, output=out, timeout=thistest.options.timeout)
    thistest.stop_if(p.status != 0, FatalError("gprbuild exit in error", out))
Esempio n. 10
0
        def __sloc_for(m):
            name = m.group(0)

            # We expect exactly one match for a name so could arrange to stop
            # at the first we find. Having multiple matches can happen out of
            # a test-writer's mistake though, and keeping only one arbitrarily
            # would cause endless confusion so we search them all and issue an
            # error as needed.

            # We expect to compare only against line numbers later on, so just
            # stash a dummy column number here, required to form a regular
            # Sloc still.

            slocs = [
                "%s:%d:0" % (os.path.basename(sp), tl.lno) for sp in idict
                for tl in idict[sp] if name in tl.text
            ]

            thistest.stop_if(
                len(slocs) != 1,
                FatalError("%d slocs found for stag %s" % (len(slocs), name)))

            return slocs[0]
Esempio n. 11
0
    def __parse_groups_from(self, scovdata):
        """
        First level of group parsing, stopping prior to XnoteP instantiations
        to allow name -> sloc resolution in between.
        """

        uxgroups = []

        # We start a new group everytime we see a "sources" line (which starts
        # with '#', after comment markers were stripped).

        current_uxg = None

        grabbing = True

        # Track the last LCX we grabbed, so we can process continuation
        # requests

        lastlx = None

        for line in scovdata:

            ctl_update, ctl_value = self.__try_ctl_update_from(line)

            if ctl_update:

                # A CTL line was found, update our processing state
                # accordingly:

                if ctl_value is None:
                    current_uxg = None
                else:
                    grabbing = ctl_value

            elif grabbing and line.startswith('+#'):

                # A continuation line, to add rnotes that didn't fit
                # on the previous ones.

                lastlx.rnps.extend(self.__parse_expected_rnotes(line[3:]))

            elif line.startswith('#'):

                # A new group starts. Close the current one first and start
                # grabbing again unconditionally:

                if current_uxg is not None:
                    uxgroups.append(self.__end_parse_on(current_uxg))

                current_uxg = UXgroup(candlists=self.__parse_sources(line))
                grabbing = True

            elif grabbing and line.startswith(('/', '=')):

                # This must be an LX line. Check lre overriding and add to the
                # set attached to the current group consistency.

                lx = self.__parse_lcx(line)

                thistest.stop_if(
                    lx.lre in current_uxg.lxset and not lx.override,
                    FatalError("LRE dup without overriding note (%s, %s)" %
                               (lx.lre, self.xfile)))

                current_uxg.lxset[lx.lre] = lx
                lastlx = lx

            else:

                # Not grabbing or regular comment. Just ignore.
                pass

        # We're done with all the lines. Close the current group, if any

        if current_uxg is not None:
            uxgroups.append(self.__end_parse_on(current_uxg))

        return uxgroups