def __init__(self, sources, options="", execs=None, alis=None, ensure_dcscos=True): self.sources = to_list(sources) self.options = options self.ensure_dcscos = ensure_dcscos # Infer default list of executables and alis from list of sources if execs != None: self.execs = to_list(execs) else: self.execs = [ exename_for(source.split('.')[0]) for source in self.sources ] if alis != None: self.alis = to_list(alis) else: self.alis = [ os.path.join("obj", "%s.ali" % source.split('.')[0]) for source in self.sources ]
def gprbuild_cargs_with(thiscargs, scovcargs=True, suitecargs=True): """ Compute and return all the cargs arguments to pass on gprbuild invocations, in accordance with the gprbuild() documentation. For SUITECARGS, include language agnostic and language specific switches (-cargs and -cargs:<lang>). """ # Check assumptions made by the production of qualification material, # which runs source coverage tests only. if thistest.options.qualif_level: thistest.stop_if( not scovcargs, FatalError("SCOV_CARGS required for qualification test")) thistest.stop_if( thiscargs, FatalError("Specific CARGS forbidden for qualification test")) # Compute the language specific cargs, from testsuite args first: lang_cargs = [] if suitecargs: for lang in KNOWN_LANGUAGES: lcargs = to_list(thistest.suite_cargs_for(lang)) if lcargs: lang_cargs.extend(["-cargs:%s" % lang] + lcargs) # Add the cargs for SCOV based tests if requested. While these are # notionally language agnostic, they are only supported for a subset of # languages in practice and could be rejected for languages not in this # subset. SCOV_CARGS = BUILDER.SCOV_CARGS(thistest.options) if scovcargs and SCOV_CARGS: for lang in KNOWN_LANGUAGES: lang_cargs.extend(["-cargs:%s" % lang] + SCOV_CARGS) # Compute the language agnostic cargs: testsuite -cargs + those requested # for this specific run. other_cargs = [] if suitecargs: other_cargs.extend(to_list(thistest.suite_cargs_for(lang=None))) other_cargs.extend(to_list(thiscargs)) if other_cargs: other_cargs = ['-cargs'] + other_cargs return lang_cargs + other_cargs
def gprbuild_largs_with(thislargs): """ Compute and return all the largs gprbuild arguments to pass. Account for specific requests in THISLARGS. """ all_largs = to_list(thistest.options.largs) all_largs.extend(to_list(thislargs)) if all_largs: all_largs.insert(0, '-largs') return all_largs
def gprbuild_cargs_with(thiscargs, scovcargs=True, suitecargs=True): """ Compute and return all the cargs arguments to pass on gprbuild invocations, in accordance with the gprbuild() documentation. For SUITECARGS, include language agnostic and language specific switches (-cargs and -cargs:<lang>). """ # Check assumptions made by the production of qualification material, # which runs source coverage tests only. if thistest.options.qualif_level: thistest.stop_if( not scovcargs, FatalError("SCOV_CARGS required for qualification test")) thistest.stop_if( thiscargs, FatalError("Specific CARGS forbidden for qualification test")) # Compute the language specific cargs, all testsuite level: lang_cargs = [] def add_for_lang(lang): lcargs = to_list(thistest.suite_cargs_for(lang)) if lcargs: lang_cargs.extend(["-cargs:%s" % lang] + lcargs) if suitecargs: for lang in KNOWN_LANGUAGES: add_for_lang(lang=lang) # Compute the language agnostic cargs: common ones + testsuite -cargs # + those for this specific run. other_cargs = [] if scovcargs: other_cargs.extend(BUILDER.SCOV_CARGS(thistest.options)) if suitecargs: other_cargs.extend(to_list(thistest.suite_cargs_for(lang=None))) other_cargs.extend(to_list(thiscargs)) if other_cargs: other_cargs = ['-cargs'] + other_cargs return lang_cargs + other_cargs
def gprbuild_gargs_with(thisgargs, trace_mode=None): """ Compute and return all the toplevel gprbuild arguments to pass. Account for specific requests in THISGARGS. """ trace_mode = trace_mode or thistest.options.trace_mode # Force a few bits useful for practical reasons and without influence on # code generation result = [ '-f', # always rebuild '-XSTYLE_CHECKS=', # style checks off '-p' # create missing directories (obj, typically) ] # Add our testsuite configuration options (selecting target model and board # essentially). result.extend(thistest.gprconfoptions) result.extend(thistest.gprvaroptions) result.extend(to_list(thisgargs)) # If we work with source instrumentation, add the dependency on # gnatcov_full_rts to that instrumented programs are compilable in the # generated projects. if trace_mode == 'src': result.append('--implicit-with={}.gpr'.format( RUNTIME_INFO.gnatcov_rts_project)) return result
def gprbuild_largs_with(thislargs): """ Compute and return all the largs gprbuild arguments to pass. Account for specific requests in THISLARGS. """ all_largs = to_list(thistest.options.largs) all_largs.extend(to_list(thislargs)) # On Windows, executables are made position independent by default, which # gnatcov does not handle, so instruct the linker to not create position # independent executables if running in bin trace mode. if (thistest.env.build.os.name == 'windows' and thistest.options.trace_mode == 'bin'): all_largs.append('-no-pie') if all_largs: all_largs.insert(0, '-largs') return all_largs
def xcov(args, out=None, err=None, inp=None, env=None, register_failure=True, auto_config_args=True, auto_target_args=True): """ Run xcov with arguments ARGS, timeout control, valgrind control if available and enabled, output directed to OUT and failure registration if register_failure is True. Return the process status descriptor. ARGS may be a list or a whitespace separated string. See xcov_suite_args for the meaning of AUTO_*_ARGS arguments. """ # Make ARGS a list from whatever it is, to allow unified processing. # Then fetch the requested command, always first: args = to_list(args) covcmd = args[0] covargs = args[1:] covargs = xcov_suite_args(covcmd, covargs, auto_config_args, auto_target_args) + covargs # Determine which program we are actually going launch. This is # "gnatcov <cmd>" unless we are to execute some designated program # for this: covpgm = thistest.suite_covpgm_for(covcmd) covpgm = ([covpgm] if covpgm is not None else maybe_valgrind([XCOV]) + [covcmd]) # Execute, check status, raise on error and return otherwise. # # The gprvar options are only needed for the "libsupport" part of our # projects. They are pointless wrt coverage run or analysis activities # so we don't include them here. p = cmdrun(cmd=covpgm + covargs, inp=inp, out=out, err=err, env=env, register_failure=register_failure) if thistest.options.enable_valgrind == 'memcheck': memcheck_log = contents_of(MEMCHECK_LOG) thistest.fail_if( memcheck_log, 'MEMCHECK log not empty' '\nFROM "%s":' '\n%s' % (' '.join(covpgm + covargs), memcheck_log)) return p
def gprbuild(project, scovcargs=True, suitecargs=True, extracargs=None, gargs=None, largs=None, trace_mode=None, runtime_project=None, out='gprbuild.out', register_failure=True): """ Cleanup & build the provided PROJECT file using gprbuild, passing GARGS/CARGS/LARGS as gprbuild/cargs/largs command-line switches. Each of these arguments may be either None, a list of options, or a string containing a space-separated list of options. SCOVCARGS tell whether or not we should prepend BUILDER.SCOV_CARGS to the -cargs switches. SUITECARGS tells whether or not we should also add the -cargs passed on the testsuite toplevel command line. See gprbuild_gargs_with for the meaning of TRACE_MODE and RUNTIME_PROJECT. OUT is the name of the file to contain gprbuild's output. Stop with a FatalError if the execution status is not zero and REGISTER_FAILURE is True. Return the process descriptor otherwise. """ # Fetch options, from what is requested specifically here # or from command line requests all_gargs = gprbuild_gargs_with( thisgargs=gargs, trace_mode=trace_mode, runtime_project=runtime_project, ) all_largs = gprbuild_largs_with(thislargs=largs) all_cargs = gprbuild_cargs_with(scovcargs=scovcargs, suitecargs=suitecargs, thiscargs=extracargs) # Now cleanup, do build and check status thistest.cleanup(project) args = (to_list(BUILDER.BASE_COMMAND) + ['-P%s' % project] + all_gargs + all_cargs + all_largs) p = run_and_log(args, output=out, timeout=thistest.options.timeout) if register_failure: thistest.stop_if(p.status != 0, FatalError("gprbuild exit in error", out)) return p
def gprbuild_gargs_with(thisgargs): """ Compute and return all the toplevel gprbuild arguments to pass. Account for specific requests in THISGARGS. """ # Force a few bits useful for practical reasons and without influence on # code generation, then add our testsuite configuration options (selecting # target model and board essentially). return [ '-f', # always rebuild '-XSTYLE_CHECKS=', # style checks off '-p' # create missing directories (obj, typically) ] + (thistest.gprconfoptions + thistest.gprvaroptions + to_list(thisgargs))
def xrun(args, out=None, env=None, register_failure=True, auto_config_args=True, auto_target_args=True): """ Run <xcov run> with arguments ARGS for the current target, performing operations only relevant to invocations intended to execute a program (for example, requesting a limit on the output trace size). """ # Force a dummy input to prevent mysterious qemu misbehavior when input is # a terminal. nulinput = "devnul" touch(nulinput) # Then possibly augment the arguments to pass. # # --kernel on the testsuite command line translates as --kernel to # gnatcov run. # # --trace-size-limit on the testsuite command line adds to the -eargs # passed to gnatcov run for cross targets running with an emulator. # # Be careful that we might have -eargs at the end of the input arguments # we receive. runargs = [] if thistest.options.kernel: runargs.append('--kernel=' + thistest.options.kernel) runargs.extend(to_list(args)) if (thistest.options.trace_size_limit and thistest.options.target and not thistest.options.board): if '-eargs' not in runargs: runargs.append('-eargs') runargs.extend( ["-exec-trace-limit", thistest.options.trace_size_limit]) return xcov(['run'] + runargs, inp=nulinput, out=out, env=env, register_failure=register_failure, auto_config_args=auto_config_args, auto_target_args=auto_target_args)
def gprbuild_gargs_with(thisgargs, trace_mode=None, runtime_project=None): """ Compute and return all the toplevel gprbuild arguments to pass. Account for specific requests in THISGARGS. If TRACE_MODE is "src", consider that we are building an instrumented project even if the testsuite mode tells otherwise. If RUNTIME_PROJECT is not null, it will be used as the name of the instrumentation runtime project in source trace mode. """ trace_mode = trace_mode or thistest.options.trace_mode # Force a few bits useful for practical reasons and without influence on # code generation result = [ '-f', # always rebuild '-XSTYLE_CHECKS=', # style checks off '-p' # create missing directories (obj, typically) ] # Add our testsuite configuration options (selecting target model and board # essentially). result.extend(thistest.gprconfoptions) result.extend(thistest.gprvaroptions) result.extend(to_list(thisgargs)) # If we work with source instrumentation, add the dependency on the # instrumentation runtime project so that instrumented programs are # compilable in the generated projects. Also use instrumented sources in # the "*-gnatcov-instr" object directories. if trace_mode == 'src': runtime_project = (runtime_project or RUNTIME_INFO.gnatcov_rts_project) result += [ f"--implicit-with={runtime_project}.gpr", "--src-subdirs=gnatcov-instr", ] return result
def gprbuild(project, scovcargs=True, suitecargs=True, extracargs=None, gargs=None, largs=None, trace_mode=None, out='gprbuild.out'): """ Cleanup & build the provided PROJECT file using gprbuild, passing GARGS/CARGS/LARGS as gprbuild/cargs/largs command-line switches. Each of these arguments may be either None, a list of options, or a string containing a space-separated list of options. SCOVCARGS tell whether or not we should prepend BUILDER.SCOV_CARGS to the -cargs switches. SUITECARGS tells whether or not we should also add the -cargs passed on the testsuite toplevel command line. OUT is the name of the file to contain gprbuild's output. """ # Fetch options, from what is requested specifically here # or from command line requests all_gargs = gprbuild_gargs_with(thisgargs=gargs, trace_mode=trace_mode) all_largs = gprbuild_largs_with(thislargs=largs) all_cargs = gprbuild_cargs_with(scovcargs=scovcargs, suitecargs=suitecargs, thiscargs=extracargs) # Now cleanup, do build and check status thistest.cleanup(project) args = (to_list(BUILDER.BASE_COMMAND) + ['-P%s' % project] + all_gargs + all_cargs + all_largs) p = run_and_log(args, output=out, timeout=thistest.options.timeout) thistest.stop_if(p.status != 0, FatalError("gprbuild exit in error", out))
def mode_build(self): # We first need to instrument, with proper selection of the units of # interest. Expect we are to provide this through a project file as # we have no LI file at hand: assert self.gprmode # If we have a request for specific options, honor that. Otherwise, # use the already computed project file for this test: if self.covctl and self.covctl.gprsw: instrument_gprsw = self.covctl.gprsw else: instrument_gprsw = GPRswitches(root_project=self.gpr) out = 'xinstr.out' xcov_instrument( covlevel=self.xcovlevel, isi_file=self.ISI_FILE, extra_args=to_list(self.covctl.covoptions) if self.covctl else [], gprsw=instrument_gprsw, gpr_obj_dir=self.gpr_obj_dir, out=out) # Standard output might contain warnings indicating instrumentation # issues. This should not happen, so simply fail as soon as the output # file is not empty. thistest.fail_if( os.path.getsize(out) > 0, 'xcov instrument standard output not empty ({}):' '\n--' '\n{}'.format(out, contents_of(out))) # Now we can build, instructing gprbuild to fetch the instrumented # sources in their dedicated subdir: gprbuild(self.gpr, extracargs=self.extracargs, gargs='--src-subdirs=gnatcov-instr')
def gen_one_xcov_report(self, inputs, format, options=""): """Helper for gen_xcov_reports, to produce one specific report for a particulat FORMAT, from provided INPUTS. The command output is saved in a file named FORMAT.out.""" # Compute the set of arguments we are to pass to gnatcov coverage. # When project files are used, force report output in the current # directory where it would be without a project file, and which the # project file might arbitrarily redirect otherwise. Doing this # conditionally prevents the gratuitous addition of command line # options which might be absent from the tool qualified interface # descriptions. covargs = ['--annotate=' + format, inputs ] + (self.covoptions + to_list(options)) if self.gprmode: covargs.append('--output-dir=.') # Run, latching standard output in a file so we can check contents on # return. ofile = format + ".out" p = xcov(args=['coverage'] + covargs, out=ofile) # Standard output might typically contain labeling warnings issued # by the static analysis phase, or error messages issued when a trace # indicates that some unlabeled edge was taken. None of this should # happen so we simply fail as soon as the output file is not empty. # Note that we do this in qualification mode as well, even though what # we're looking at is not stricly part of the qualified interface. thistest.fail_if( os.path.getsize(ofile) > 0, "xcov standard output not empty (%s):\n--\n%s" % (ofile, contents_of(ofile)))
def __expand_cspecs(self, patterns): """Add to the list of consolidation specs to exercize the set of files corresponding to every glob pattern in PATTERNS.""" for p in to_list(patterns): self.all_cspecs.extend(ls(p))
def __expand_drivers(self, patterns): """Add to the list of drivers to exercize the set of files corresponding to every glob pattern in PATTERNS.""" for p in to_list(patterns): self.all_drivers.extend(ls(p))
def run(self): """Evaluate source coverage as exercised by self.drivers""" self.log() # Whatever the kind of test, we get to a Working Directory and # switch back when done: self.to_workdir(self.rwdir()) # Compute our GPR now, which we will need for build of single tests # and/or analysis later on if in gprmode. Turn inlining off for the # driver unit, so we exercise the functional code as separately # compiled, not as an inlined version of it in a non-representative # driver context. # Most of the tests with coverage control operate within # an extra subdir level this_depth = (thistest.depth + 1 if self.covctl else thistest.depth) self.gpr = gprfor( mains=self.drivers, prjid="gen", srcdirs=["../" * n + "src" for n in range(1, this_depth)], exedir=self.abdir(), main_cargs="-fno-inline", langs=["Ada", "C"], deps=self.covctl.deps if self.covctl else (), extra=self.covctl.gpr() if self.covctl else "") # For single tests (no consolidation), we first need to build, # producing the binary to execute and the ALIs files, then to gnatcov # run to get an execution trace. All these we already have for # consolidation tests, and there's actually no need to build if we # were provided a bin directory to reuse: if self.singletest() and not self.wdctl.reuse_bin: gprbuild(self.gpr, extracargs=self.extracargs) # Compute the gnatcov command line argument we'll pass to convey # the set of scos to operate upon. Note that we need these for # both gnatcov run and gnatcov coverage. thistest.gprmode = (thistest.options.gprmode or (self.covctl and self.covctl.requires_gpr())) self.scoptions = ( to_list(self.covctl.scoptions) if (self.covctl and self.covctl.scoptions) else ["-P%s" % self.gpr] if thistest.gprmode else ["--scos=@%s" % list_to_file(self.ali_list(), "alis.list")]) # Remember which of these indicate the use of project files, which # might influence default output dirs for example. self.gproptions = [ opt for opt in self.scoptions if opt.startswith("-P") ] # Do gnatcov run now unless we're consolidating. We'll just reuse # traces from previous executions in the latter case. if self.singletest(): self.xcov_run(no_ext(self.drivers[0])) # At this point, we have everything we need for the analysis. Either # from the just done build+run in the single test case, or from # previous such sequences in the consolidation case. Run gnatcov # coverage to get actual coverage reports and check against our # Xpectation specs. self.gen_xcov_reports() self.check_expectations() self.to_homedir() thistest.flush() # Let callers retrieve execution data at will return self
def do(command): """ Execute COMMAND. Abort and dump output on failure. Return output otherwise. """ p = cmdrun(cmd=to_list(command), register_failure=True) return p.out
def gprfor(mains, prjid="gen", srcdirs="src", objdir=None, exedir=".", main_cargs=None, langs=None, deps=(), compiler_extra="", extra=""): """ Generate a simple PRJID.gpr project file to build executables for each main source file in the MAINS list, sources in SRCDIRS. Inexistant directories in SRCDIRS are ignored. Assume the set of languages is LANGS when specified; infer from the mains otherwise. Add COMPILER_EXTRA, if any, at the end of the Compiler package contents. Add EXTRA, if any, at the end of the project file contents. Return the gpr file name. """ deps = '\n'.join('with "%s";' % dep for dep in deps) mains = to_list(mains) srcdirs = to_list(srcdirs) langs = to_list(langs) # Fetch the support project file template template = contents_of(os.path.join(ROOT_DIR, "template.gpr")) # Instanciate the template fields. # Turn the list of main sources into the proper comma separated sequence # of string literals for the Main GPR attribute. gprmains = ', '.join('"%s"' % m for m in mains) # Likewise for source dirs. Filter on existence, to allow widening the set # of tentative dirs while preventing complaints from gprbuild about # inexistent ones. Remove a lone trailing comma, which happens when none # of the provided dirs exists and would produce an invalid gpr file. srcdirs = ', '.join('"%s"' % d for d in srcdirs if os.path.exists(d)) srcdirs = srcdirs.rstrip(', ') # Determine the language(s) from the mains. languages_l = langs or set(language_info(main).name for main in mains) languages = ', '.join('"%s"' % l for l in languages_l) # The base project file we need to extend, and the way to refer to it # from the project contents. This provides a default last chance handler # on which we rely to detect termination on exception occurrence. basegpr = (("%s/support/base" % ROOT_DIR) if control.need_libsupport() else None) # If we have specific flags for the mains, append them. This is # typically something like: # # for Switches("test_blob.adb") use # Compiler'Default_Switches("Ada") & ("-fno-inline") compswitches = ('\n'.join([ 'for Switches("%s") use \n' ' Compiler\'Default_Switches ("%s") & (%s);' % (main, language_info(main).name, ','.join( ['"%s"' % carg for carg in to_list(main_cargs)])) for main in mains ]) + '\n') # Now instanciate, dump the contents into the target gpr file and return gprtext = template % { 'prjname': prjid, 'extends': ('extends "%s"' % basegpr) if basegpr else "", 'srcdirs': srcdirs, 'exedir': exedir, 'objdir': objdir or (exedir + "/obj"), 'compswitches': compswitches, 'languages': languages, 'gprmains': gprmains, 'deps': deps, 'compiler_extra': compiler_extra, 'pkg_emulator': gpr_emulator_package(), 'extra': extra } return text_to_file(text=gprtext, filename=prjid + ".gpr")
def check(root_project, recurse, projects=None, units=None, xreports=None): """ Check that running our test with -P`root_project` [--projects=... for `projects`] [--units=... for `units`] [--no-subprojects] (if `recurse` is False) we obtain reports for the units attached to the projects listed in `xreports`. If not None, `projects` and `xreports` are expected to be lists of shortcut names like 'boolops', 'intops' or 'counters'. This function takes care of converting them to relative project file names actually expected on the command line or in real project file dependencies. `root_project` may be either a .gpr filename, in which case it is used as-is, or a project short name. `recurse` None means "arrange not to pass any option influencing recursiveness". """ projects = to_list(projects) units = to_list(units) # root_project, projects, and units arguments we will provide to the # GPRswitches class: gprsw_root_project = (root_project if root_project.endswith('.gpr') else _gpr_for(root_project)) gprsw_projects = [_gpr_for(prj) for prj in projects] gprsw_units = units # Arrange to execute each check in its own tmp dir and # passing a unique --subdirs prevent mixups across test variants # within the shared projects. # Start with 'wd_foo' from .../.../foo.gpr or a project short # name intended for -P. tmpdir = 'wd_' + os.path.basename(root_project).split('.')[0] # Append the first letter of each project name will pass through # --project, if any: if projects: tmpdir += '-' + ''.join(prj[0] for prj in projects) # Append indication on recursion request: if recurse: tmpdir += '-rt' elif recurse is None: tmpdir += '-rn' else: tmpdir += '-rf' # For the --subdirs argument, relative to each subproject's object dir, # prepend our testcase local directory name: gprsw_subdirs = os.path.basename(os.getcwd()) + '_' + tmpdir # If a list of expected reports is provided, convert into list of # corresponding sources, which the CovControl class expects: if xreports is not None: ctl_xreports = [] for xr in xreports: ctl_xreports.extend(_xreports[xr] if xr in _xreports else [xr]) else: ctl_xreports = None # Getting the default behavior wrt recursiveness consists # in requesting not to pass --no-subprojects. gprsw_no_subprojects = False if recurse is None else not recurse wd = Wdir(clean=True) wd.to_subdir(tmpdir) TestCase(category=None).run(covcontrol=CovControl( # The programs we build and exercise alway depend on # the three subprojects: deps=[_gpr_for('boolops'), _gpr_for('intops'), _gpr_for('counters')], # What we analyse and check depends on our arguments: gprsw=GPRswitches(root_project=gprsw_root_project, projects=gprsw_projects, units=gprsw_units, no_subprojects=gprsw_no_subprojects, subdirs=gprsw_subdirs, xvars=[('BOARD', env.target.machine)]), xreports=ctl_xreports, # The test driver and the likes are never of interest units_in=[])) wd.to_homedir()
def add_for_lang(lang): lcargs = to_list(thistest.suite_cargs_for(lang)) if lcargs: lang_cargs.extend(["-cargs:%s" % lang] + lcargs)
def __init__(self, testcase, drivers, xfile, xcovlevel, covctl, wdctl): # The TESTCASE object that delegates the hard work to us :-) self.testcase = testcase # The set of DRIVER sources that we are to exercise. We use this # as a precise approximation of a set of main subprogram or local # executable names so care about basenames only: self.drivers = [os.path.basename(d) for d in drivers] # The "--level" argument we ought to use on gnatcov command lines: self.xcovlevel = xcovlevel # The CovControl object that controls aspects of our coverage # testing operations (project file dependencies, units for which # we expect reports to be produced, ...) self.covctl = covctl # Internal attributes: Directory where the instantiation takes place, # original expectations file, and base prefix of Working Directory # names self.homedir = os.getcwd() + "/" self.xfile = xfile # The WdirControl object telling about the Working and Binary # subdir prefixes we are to use: self.wdctl = wdctl # Compute the gnatcov coverage specific extra options that we'll have # to pass. We need these early for Xnote expansions. self.covoptions = ['--level=' + self.xcovlevel] if self.covctl: self.covoptions += to_list(self.covctl.covoptions) # Compute the list of test launch options strings that we need for # expectation CTL lines. ctl_opts = ['--trace-mode=%s' % thistest.options.trace_mode] self.extracargs = to_list(self.testcase.extracargs) # { sourcename -> KnoteDict } dictionaries of emitted/expected # line/report notes. We'll extract emitted notes from reports when we # know they have been produced. We extract expected notes from the # provided expectation file. # This needs to be done now, to make sure that we can register this # driver object with maximum details for qualification results before # run() is called, hence early wrt possible exception occurrences. self.elnotes = {} self.ernotes = {} xnotes = XnotesExpander( xfile=xfile, xcov_level=xcovlevel, ctl_opts=ctl_opts, ctl_cov=self.covoptions, ctl_cargs=gprbuild_cargs_with(thiscargs=self.extracargs), ctl_tags=thistest.options.tags, ctl_cons=[thistest.options.consolidate]) self.xlnotes = xnotes.xlnotes self.xrnotes = xnotes.xrnotes # Even though we remember them here, we won't be looking at the # xlnotes if we're running for qualification. # Empty expectation sets here mean we have not a single source on # which anything will be checked. This can only be a mistake and would # just pass if we let the processing continue. thistest.fail_if(not self.xlnotes, "empty xlnotes from %s !!" % xfile) thistest.fail_if(not self.xrnotes, "empty xrnotes from %s !!" % xfile)