def gprbuild_cargs_with(thiscargs, scovcargs=True, suitecargs=True): """ Compute and return all the cargs arguments to pass on gprbuild invocations, in accordance with the gprbuild() documentation. For SUITECARGS, include language agnostic and language specific switches (-cargs and -cargs:<lang>). """ # Check assumptions made by the production of qualification material, # which runs source coverage tests only. if thistest.options.qualif_level: thistest.stop_if( not scovcargs, FatalError("SCOV_CARGS required for qualification test")) thistest.stop_if( thiscargs, FatalError("Specific CARGS forbidden for qualification test")) # Compute the language specific cargs, from testsuite args first: lang_cargs = [] if suitecargs: for lang in KNOWN_LANGUAGES: lcargs = to_list(thistest.suite_cargs_for(lang)) if lcargs: lang_cargs.extend(["-cargs:%s" % lang] + lcargs) # Add the cargs for SCOV based tests if requested. While these are # notionally language agnostic, they are only supported for a subset of # languages in practice and could be rejected for languages not in this # subset. SCOV_CARGS = BUILDER.SCOV_CARGS(thistest.options) if scovcargs and SCOV_CARGS: for lang in KNOWN_LANGUAGES: lang_cargs.extend(["-cargs:%s" % lang] + SCOV_CARGS) # Compute the language agnostic cargs: testsuite -cargs + those requested # for this specific run. other_cargs = [] if suitecargs: other_cargs.extend(to_list(thistest.suite_cargs_for(lang=None))) other_cargs.extend(to_list(thiscargs)) if other_cargs: other_cargs = ['-cargs'] + other_cargs return lang_cargs + other_cargs
def __parse_lcx(self, image): """ Parse IMAGE as a string that contains a line expectation spec and return the corresponding LineCX object. """ # Extract the various parts of interest from the image m = re.match( string=image, pattern=r"\s*(?P<lre>=?/.*?/)\s+(?P<lnote>.*) ## (?P<rnotes>.*)") if m is None: raise FatalError("Invalid '%s' line expectation spec." "\nExpected /LRE/ lnotes ## rnotes" % image) lx_lre = m.group("lre") lre_override = lx_lre[0] == '=' if lx_lre[0] != '/': lx_lre = lx_lre[1:] lx_lre = lx_lre.strip('/') lx_lnote = XnoteP(text=self.__select_lnote(m.group("lnote")), stext=None) lx_rnotes = self.__parse_expected_rnotes(m.group("rnotes")) # If none of the stated report expectation applies to the current # xcov-level, default to the empty set: if not lx_rnotes: lx_rnotes = [XnoteP(text="0", stext=None)] # If we have both an empty-set expectation and something else, # expectations are wrong. This is a safeguard against a common # mistake, thinking, say, that "d=>dT-, 0" means "dT- if # --level=stmt+decision, 0 _otherwise_", while it means "dT- etc, 0 # _always_ (not tied to any particular level)" instead. else: thistest.stop_if( len(lx_rnotes) > 1 and "0" in lx_rnotes, FatalError("Contradictory =report expectation in %s" % image)) return LineCX(lre=lx_lre, lnp=lx_lnote, rnps=lx_rnotes, override=lre_override)
def gprbuild_cargs_with(thiscargs, scovcargs=True, suitecargs=True): """ Compute and return all the cargs arguments to pass on gprbuild invocations, in accordance with the gprbuild() documentation. For SUITECARGS, include language agnostic and language specific switches (-cargs and -cargs:<lang>). """ # Check assumptions made by the production of qualification material, # which runs source coverage tests only. if thistest.options.qualif_level: thistest.stop_if( not scovcargs, FatalError("SCOV_CARGS required for qualification test")) thistest.stop_if( thiscargs, FatalError("Specific CARGS forbidden for qualification test")) # Compute the language specific cargs, all testsuite level: lang_cargs = [] def add_for_lang(lang): lcargs = to_list(thistest.suite_cargs_for(lang)) if lcargs: lang_cargs.extend(["-cargs:%s" % lang] + lcargs) if suitecargs: for lang in KNOWN_LANGUAGES: add_for_lang(lang=lang) # Compute the language agnostic cargs: common ones + testsuite -cargs # + those for this specific run. other_cargs = [] if scovcargs: other_cargs.extend(BUILDER.SCOV_CARGS(thistest.options)) if suitecargs: other_cargs.extend(to_list(thistest.suite_cargs_for(lang=None))) other_cargs.extend(to_list(thiscargs)) if other_cargs: other_cargs = ['-cargs'] + other_cargs return lang_cargs + other_cargs
def cmdrun(cmd, inp=None, out=None, err=None, register_failure=True): """ Execute the command+args list in CMD, redirecting its input, output and error streams to INP, OUT and ERR when not None, respectively. Stop with a FatalError if the execution status is not zero and REGISTER_FAILURE is True. Return the process descriptor otherwise. """ # Setup a dictionary of Run input/output/error arguments for which a # non default value is requested. kwargs = { key: value for key, value in [('input', inp), ('output', out), ('error', err)] if value } p = run_and_log(cmd, timeout=thistest.options.timeout, **kwargs) thistest.stop_if( register_failure and p.status != 0, FatalError('"%s"' % ' '.join(cmd) + ' exit in error', outfile=out, outstr=p.out)) return p
def srctracename_for(pgmname, register_failure=True): """ Name for the source trace file for the given program name. Since source trace files is not predictible, we need to do produce the source trace file first and then look for a file matching a pattern to find it. If we find zero or multiple traces and "register_failure" is True, this stops the testcase. If "register_failure" is False, we just return None in that case. """ pattern = srctrace_pattern_for(pgmname) trace_files = glob.glob(pattern) if len(trace_files) == 1: return trace_files[0] elif register_failure: thistest.stop( FatalError("Exactly one trace expected matching:" f"\n {pattern}" f"\nBut got {len(trace_files)} traces instead")) else: return None
def __category_from_dir(self): """Compute test category from directory location.""" for cat in CAT.critcats: if re.search("/%s(/|$)" % cat.name, TEST_DIR): return cat raise FatalError("Unable to infer test category from subdir '%s'" % TEST_DIR)
def check_block_on(self, tline): if self.blopen_p(tline): self.current_block = Block(parent=self.current_block) if self.blclose_p(tline): thistest.stop_if( not self.current_block, FatalError("end of nonexistant block at\n=> " + tline.text)) self.current_block = self.current_block.parent
def __category_from_dir(self): """Compute test category from directory location.""" test_dir = thistest.reldir for cat in CAT.critcats: if re.search(r"/%s" % cat.name, test_dir): return cat raise FatalError("Unable to infer test category from subdir '%s'" % test_dir)
def __category_from_dir(self): """Compute test category from directory location.""" # Canonicalize directory separators to simplify the maching logic test_dir = TEST_DIR.replace('\\', '/') for cat in CAT.critcats: if re.search(r"/%s" % cat.name, test_dir): return cat raise FatalError("Unable to infer test category from subdir '%s'" % TEST_DIR)
def __init__(self, extradrivers="", extracargs="", category=CAT.auto): # By default, these test cases expect no error from subprocesses (xrun, # xcov, etc.) self.expect_failures = False # Step 1: Compute the list of drivers and consolidation specs # to exercise # ----------------------------------------------------------- # Drivers first. Probe all those from src/ plus those explicitely # provided. If that is empty, see if we have bodies aimed at being # exercised by common drivers up-tree. Abort if there's nothing to # exercise at all self.all_drivers = [] self.__expand_drivers("src/test_*.adb src/test_*.c " + extradrivers) if len(self.all_drivers) == 0: self.__expand_shared_controllers(drivers=True, cspecs=False) thistest.stop_if( len(self.all_drivers) == 0, FatalError("Request to exercise empty test_set")) # Seek consolidation specs, then. Similar scheme, local check first, # then seek shared entities self.all_cspecs = [] self.__expand_cspecs("src/cons_*.txt") if len(self.all_cspecs) == 0: self.__expand_shared_controllers(drivers=False, cspecs=True) # Step 2: Determine a few test parameters common to all drivers # ------------------------------------------------------------- # - test category: # If automatic detection from subdir was requested, do that. # Otherwise, use the provided argument, which might be None or a # criterion related value. self.category = (self.__category_from_dir() if category == CAT.auto else category) # - extra compilation arguments, added to what --cargs was provided to # the testsuite command line: self.extracargs = extracargs # Step 3: Setup qualification data for this testcase # -------------------------------------------------- self.qdata = Qdata(tcid=TEST_DIR)
def instanciate_over(self, tline, block, kind): thisni = Xnote(xnp=self.notep, block=block, kind=kind) # Register matches for Segments corresponding to all the instances of # the subtext we find, possibly extended. Error out if too few or too # many. # Compute a base subtext to start from and whether we should extend or # not. base = self.stext if self.stext.startswith('(') and self.stext.endswith('*)'): base = self.stext[0:-2] extend = True else: extend = False # As we use RE services to find matches for the string, and their # position in the line, we need to escape it to make sure any special # character is not considered as part of a regexp expression. base = re.escape(base) for bm in re.finditer(pattern=base, string=tline.text): thisni.register_match( Segment( tline.lno, bm.start() + 1, self.__extended_segend_for(bm=bm, tline=tline) if extend else bm.end())) thistest.stop_if( thisni.nmatches == 0, FatalError("couldn't find subtext '%s' in line '%s'" % (self.stext, tline.text))) thistest.stop_if( thisni.nmatches > 1, FatalError("multiple matches of subtext '%s' in line '%s'" % (self.stext, tline.text))) return thisni
def gprbuild(project, scovcargs=True, suitecargs=True, extracargs=None, gargs=None, largs=None, trace_mode=None, runtime_project=None, out='gprbuild.out', register_failure=True): """ Cleanup & build the provided PROJECT file using gprbuild, passing GARGS/CARGS/LARGS as gprbuild/cargs/largs command-line switches. Each of these arguments may be either None, a list of options, or a string containing a space-separated list of options. SCOVCARGS tell whether or not we should prepend BUILDER.SCOV_CARGS to the -cargs switches. SUITECARGS tells whether or not we should also add the -cargs passed on the testsuite toplevel command line. See gprbuild_gargs_with for the meaning of TRACE_MODE and RUNTIME_PROJECT. OUT is the name of the file to contain gprbuild's output. Stop with a FatalError if the execution status is not zero and REGISTER_FAILURE is True. Return the process descriptor otherwise. """ # Fetch options, from what is requested specifically here # or from command line requests all_gargs = gprbuild_gargs_with( thisgargs=gargs, trace_mode=trace_mode, runtime_project=runtime_project, ) all_largs = gprbuild_largs_with(thislargs=largs) all_cargs = gprbuild_cargs_with(scovcargs=scovcargs, suitecargs=suitecargs, thiscargs=extracargs) # Now cleanup, do build and check status thistest.cleanup(project) args = (to_list(BUILDER.BASE_COMMAND) + ['-P%s' % project] + all_gargs + all_cargs + all_largs) p = run_and_log(args, output=out, timeout=thistest.options.timeout) if register_failure: thistest.stop_if(p.status != 0, FatalError("gprbuild exit in error", out)) return p
def instanciate_over(self, tline, block, kind): thisni = Xnote(xnp=self.notep, block=block, kind=kind) # Register matches for Segments corresponding to all the instances of # the subtext we find, possibly extended. Error out if too few or too # many. # Compute a base subtext to start from and whether we should extend or # not. If we should, include the opening paren as part of the base, # escaped as we're going to use RE services to test for multiple # occurrences. if self.stext.startswith('(') and self.stext.endswith('*)'): base = '\\' + self.stext[0:-2] extend = True else: base = self.stext extend = False for bm in re.finditer(pattern=base, string=tline.text): thisni.register_match( Segment( tline.lno, bm.start() + 1, self.__extended_segend_for(bm=bm, tline=tline) if extend else bm.end())) thistest.stop_if( thisni.nmatches == 0, FatalError("couldn't find subtext '%s' in line '%s'" % (self.stext, tline.text))) thistest.stop_if( thisni.nmatches > 1, FatalError("multiple matches of subtext '%s' in line '%s'" % (self.stext, tline.text))) return thisni
def __init__(self, text, stext=None, stag=None): # WEAK conveys whether it is ok (not triggering test failure) for # expectations produced by this pattern not to be discharged by an # emitted note. self.weak = text[0] == '~' if self.weak: text = text[1:] # KIND is the kind of note this expectation stands for self.kind = self.NK_for[text] # STEXT is a substring in matching source lines where the sloc of an # emitted note must fall to discharge. For example: # # 6 - 10 # v v # 4: if Cond1 and then Cond2 then -- # expr # # /expr/ c!:"Cond1" # # yields "Cond1" in stext meaning that we must find an emitted note # with a sloc pointing somewhere within col 6 and col 10 to discharge # the expectation for line 4 here. self.stext = stext # We could require and use stext to store expected justification text # for exemptions. We don't handle that as of today. thistest.stop_if ( False and self.stext == None and self.kind in xNoteKinds, FatalError ("expected justification text required for %s" % text)) # STAG is the separation tag that we must find on an emitted note to # discharge expectations produced from this pattern. Initially, at this # __init__ point, this is set with the stag text found. self.stag = stag # Setup our instanciation factory now, which lets us perform the # required test only once: self.factory = ( _XnoteP_block (notep=self) if block_p (self.kind) else _XnoteP_line (notep=self) if not self.stext else _XnoteP_segment (notep=self, stext=stext) )
def gprinstall(project, prefix=None): """ Run "gprinstall" on the provided project file. :param None|str prefix: If a string is passed, add `--prefix=PREFIX` to the gprinstall command-line. """ ofile = 'gprinstall.out' args = ['gprinstall', '-P', project, '-p'] if prefix: args.append('--prefix={}'.format(prefix)) p = run_and_log(args, output=ofile, timeout=thistest.options.timeout) thistest.stop_if(p.status != 0, FatalError('gprinstall exit in error', ofile))
def __select_srlist_from(self, candlists): """ Search and return the one good list of units amongst the candidates we have. """ goodlists = [] for slist in candlists: self.__examine_source_list(slist, goodlists) thistest.stop_if( len(goodlists) != 1, FatalError("goodlists = %d, != 1 for %s" % (len(goodlists), candlists))) return goodlists[0]
def __select_lnote(self, text): """Decode text to return the line note for the current coverage level.""" lx_lnote_list = [alt.strip() for alt in text.split(',')] level_table = dict( ln_tuple for cond_notes in lx_lnote_list for ln_tuple in self.__decode_note_choice(cond_notes)) if self.xcov_level in level_table: return level_table[self.xcov_level] elif '' in level_table: return level_table[''] else: raise FatalError( "Missing line expectation choice for level %s in %s" % (self.xcov_level, text))
def __init__(self, sref, LXset): self.LXset = LXset # dictionaries of expected line and report notes for our unit self.xldict = KnoteDict(xlNoteKinds) self.xrdict = KnoteDict(xrNoteKinds) self.current_block = None self.current_srules = {} self.tfile = Tfile(filename=sref.spath, process=self.process_tline) self.sref = sref thistest.stop_if(self.current_block, FatalError("fuzz block still open at EOF"))
def gprbuild(project, scovcargs=True, suitecargs=True, extracargs=None, gargs=None, largs=None, trace_mode=None, out='gprbuild.out'): """ Cleanup & build the provided PROJECT file using gprbuild, passing GARGS/CARGS/LARGS as gprbuild/cargs/largs command-line switches. Each of these arguments may be either None, a list of options, or a string containing a space-separated list of options. SCOVCARGS tell whether or not we should prepend BUILDER.SCOV_CARGS to the -cargs switches. SUITECARGS tells whether or not we should also add the -cargs passed on the testsuite toplevel command line. OUT is the name of the file to contain gprbuild's output. """ # Fetch options, from what is requested specifically here # or from command line requests all_gargs = gprbuild_gargs_with(thisgargs=gargs, trace_mode=trace_mode) all_largs = gprbuild_largs_with(thislargs=largs) all_cargs = gprbuild_cargs_with(scovcargs=scovcargs, suitecargs=suitecargs, thiscargs=extracargs) # Now cleanup, do build and check status thistest.cleanup(project) args = (to_list(BUILDER.BASE_COMMAND) + ['-P%s' % project] + all_gargs + all_cargs + all_largs) p = run_and_log(args, output=out, timeout=thistest.options.timeout) thistest.stop_if(p.status != 0, FatalError("gprbuild exit in error", out))
def __sloc_for(m): name = m.group(0) # We expect exactly one match for a name so could arrange to stop # at the first we find. Having multiple matches can happen out of # a test-writer's mistake though, and keeping only one arbitrarily # would cause endless confusion so we search them all and issue an # error as needed. # We expect to compare only against line numbers later on, so just # stash a dummy column number here, required to form a regular # Sloc still. slocs = [ "%s:%d:0" % (os.path.basename(sp), tl.lno) for sp in idict for tl in idict[sp] if name in tl.text ] thistest.stop_if( len(slocs) != 1, FatalError("%d slocs found for stag %s" % (len(slocs), name))) return slocs[0]
def __decode_note_choice(self, text): """ Given a note_choice that depends potentially on a list of coverage levels, return a list of (xcov-level, expected-note-text) tuples that represent those dependences. For instance, given: 'u => l!' or 'sd => l+' ...this function will return: [('stmt+uc_mcdc', 'l!')] or [('stmt', 'l+'), ('stmt+decision', 'l+')] """ level_from_char = { "s": "stmt", "d": "stmt+decision", "m": "stmt+mcdc", "u": "stmt+uc_mcdc" } result = text.split("=>") if len(result) == 1: # No choice return [("", text)] elif len(result) > 2: # Parse error raise FatalError("Note choice %s contains more than one arrow" % text) else: note = result[1].lstrip(' ') lev_list = result[0].rstrip(' ') return [(level_from_char[lchar], note) for lchar in lev_list]
def categorize(self, qda): for cat in self.categories: if cat.trymatch(qda): return raise FatalError(comment="unable to categorize testcase %s" % qda.tcid)
def __parse_groups_from(self, scovdata): """ First level of group parsing, stopping prior to XnoteP instantiations to allow name -> sloc resolution in between. """ uxgroups = [] # We start a new group everytime we see a "sources" line (which starts # with '#', after comment markers were stripped). current_uxg = None grabbing = True # Track the last LCX we grabbed, so we can process continuation # requests lastlx = None for line in scovdata: ctl_update, ctl_value = self.__try_ctl_update_from(line) if ctl_update: # A CTL line was found, update our processing state # accordingly: if ctl_value is None: current_uxg = None else: grabbing = ctl_value elif grabbing and line.startswith('+#'): # A continuation line, to add rnotes that didn't fit # on the previous ones. lastlx.rnps.extend(self.__parse_expected_rnotes(line[3:])) elif line.startswith('#'): # A new group starts. Close the current one first and start # grabbing again unconditionally: if current_uxg is not None: uxgroups.append(self.__end_parse_on(current_uxg)) current_uxg = UXgroup(candlists=self.__parse_sources(line)) grabbing = True elif grabbing and line.startswith(('/', '=')): # This must be an LX line. Check lre overriding and add to the # set attached to the current group consistency. lx = self.__parse_lcx(line) thistest.stop_if( lx.lre in current_uxg.lxset and not lx.override, FatalError("LRE dup without overriding note (%s, %s)" % (lx.lre, self.xfile))) current_uxg.lxset[lx.lre] = lx lastlx = lx else: # Not grabbing or regular comment. Just ignore. pass # We're done with all the lines. Close the current group, if any if current_uxg is not None: uxgroups.append(self.__end_parse_on(current_uxg)) return uxgroups