def run(self): tmp = Wdir('tmp_') # Compile all the sources. This method will not work if there are # sources that are not in the "." directory, but since executabes are # processed next, there will be an error if not all sources are # compiled. project = gprfor(self.sources, srcdirs=[".."], main_cargs=self.options) gprbuild(project, gargs=["-bargs", "-z"]) # If requested, check at least one non statement SCO in alis if self.ensure_dcscos: for ali in self.alis: thistest.fail_if(not match('^C[^S ]', ali, re.MULTILINE), "couldn't find non-statement SCO in %s" % ali) # Run xcov map-routines and check absence of errors mapoutput = do( maybe_valgrind([ XCOV, 'map-routines', '-v', '--scos=@{}'.format(list_to_file(self.alis)), ] + self.execs)) maperrors = [ str(m) for m in re.findall(r"(\*\*\*|\!\!\!)(.*)", mapoutput) ] thistest.log('\n'.join(maperrors)) thistest.fail_if( maperrors, "expect no map-routines error for %s" % ", ".join(self.sources)) tmp.to_homedir()
def gen_xcov_reports(self): """Generate the reports against which we will check expectation specs. Request the report format, saved as test.rep, and the xcov format (.ad?.xcov outputs) if we're not in qualification mode""" # Determine what options we are going to provide as the # assessement's inputs. # For a single driver, we always rely on a trace as input and we # produce a checkpoint for possible future consolidation if the # current execution mode calls for it: checkpoints = thistest.options.consolidate == 'checkpoints' single_driver = no_ext(self.drivers[0]) if self.singletest() else None use_checkpoint_inputs = checkpoints and not single_driver # We request traces as input with "@inputs.list", where the file # contains the list of traces to use, derived from the set of drivers. # We request checkpoints as inputs with "[email protected]", # where the file contains the list of checkpoints to use, derived # from the set of drivers as well: (input_opt, input_fn) = \ ("--checkpoint=", ckptname_for) if use_checkpoint_inputs \ else ("", tracename_for) inputs = "%s@" % input_opt + list_to_file([ self.awdir_for(no_ext(main)) + input_fn(no_ext(main)) for main in self.drivers ], "inputs.list") # We don't need and don't want to pass SCO options when using # checkpoints as inputs: report_options = self.scoptions if not use_checkpoint_inputs else [] report_options.extend(['-o', 'test.rep']) if single_driver and checkpoints: report_options.append("--save-checkpoint=%s" % ckptname_for(single_driver)) self.gen_one_xcov_report(inputs, format="report", options=report_options) # Now produce an alternate .xcov output format, unless we are # performing a qualification run, for which that format isn't # appropriate. if thistest.options.qualif_level: return xcov_options = self.scoptions if not use_checkpoint_inputs else [] self.gen_one_xcov_report(inputs, format="xcov", options=xcov_options)
def mode_coverage_sco_options(self): # Units of interest are conveyed at instrumentation time # and the corresponding SCOs are held by the instrumentation # checkpoints. instr_checkpoints_opt = "--isi=@%s" % list_to_file( [self.abdir_for(pgm) + self.ISI_FILE for pgm in self.programs()], "isi-files.list") return [instr_checkpoints_opt]
def mode_coverage_sco_options(self): # If we have a request for specific options, honor that. Otherwise, # if we are requested to convey unit of interest through project file # attributes, use our build project file which has been amended for # that. Otherwise, fallback to --scos with a list of ALIs we compute # here: if self.covctl and self.covctl.gprsw: return self.covctl.gprsw.as_strings elif self.gprmode: return ["-P%s" % self.gpr] else: return ["--scos=@%s" % list_to_file(self.ali_list(), "alis.list")]
from SCOV.tc import TestCase from SCOV.tctl import CovControl from SUITE.context import thistest from SUITE.cutils import Wdir, list_to_file wd = Wdir() # Check on lone node unit only wd.to_subdir("wd_1") TestCase(category=None).run(covcontrol=CovControl( ulist_in="../" + list_to_file(["ops"]), xreports=["ops.ads", "ops.adb"])) # Check on lone node + child unit wd.to_subdir("wd_2") TestCase(category=None).run( covcontrol=CovControl(ulist_in="../" + list_to_file(["ops", "ops.andthen"]), xreports=["ops.ads", "ops.adb", "ops-andthen.adb"])) # Check on lone child unit only wd.to_subdir("wd_3") TestCase(category=None).run( covcontrol=CovControl(ulist_in="../" + list_to_file(["ops.andthen"]), xreports=["ops-andthen.adb"])) thistest.result()
from SCOV.tc import TestCase from SCOV.tctl import CovControl from SUITE.context import thistest from SUITE.cutils import Wdir, list_to_file base_out = ["support", "test_or_ft", "test_and_tt", "test_and_tf"] wd = Wdir() # Check on lone node unit only wd.to_subdir("wd_1") TestCase(category=None).run( covcontrol=CovControl(ulist_out="../" + list_to_file(base_out + ["ops"]), xreports=["ops-andthen.adb", "ops-orelse.adb"])) # Check on child units only wd.to_subdir("wd_2") TestCase(category=None).run(covcontrol=CovControl( ulist_out="../" + list_to_file(base_out + ["ops.orelse", "ops.andthen"]), xreports=["ops.ads", "ops.adb"])) # Check on root + child unit wd.to_subdir("wd_3") TestCase(category=None).run( covcontrol=CovControl(ulist_out="../" + list_to_file(base_out + ["ops", "ops.andthen"]), xreports=["ops-orelse.adb"])) thistest.result()
def gen_xcov_reports(self): """Generate the reports against which we will check expectation specs. Request the report format, saved as test.rep, and the xcov format (.ad?.xcov outputs) if we're not in qualification mode""" # Determine what options we are going to provide as the # assessement's inputs. # For a single driver, we always rely on a trace as input and we # produce a checkpoint for possible future consolidation if the # current execution mode calls for it: checkpoints = thistest.options.consolidate == 'checkpoints' single_driver = no_ext(self.drivers[0]) if self.singletest() else None use_checkpoint_inputs = checkpoints and not single_driver # We request traces as input with "@inputs.list", where the file # contains the list of traces to use, derived from the set of drivers. # We request checkpoints as inputs with "[email protected]", # where the file contains the list of checkpoints to use, derived # from the set of drivers as well: (input_opt, input_fn) = \ ("--checkpoint=", ckptname_for) if use_checkpoint_inputs \ else ("", self.mode_tracename_for) inputs = "%s@%s" % (input_opt, list_to_file([ self.awdir_for(pgm) + input_fn(pgm) for pgm in self.programs() ], "inputs.list")) # Determine what command line options we'll pass to designate units of # interest and maybe produce a coverage checkpoint. We don't need and # don't want to pass SCO options when using checkpoints as inputs. sco_options = ([] if use_checkpoint_inputs else self.mode_coverage_sco_options()) save_checkpoint_options = ([ "--save-checkpoint=%s" % ckptname_for(single_driver) ] if single_driver and checkpoints else []) # Now produce the --annotate=report format: self.gen_one_xcov_report(inputs, format="report", options=sco_options + save_checkpoint_options + ['-o', 'test.rep']) # Then an alternate .xcov output format, unless we are performing a # qualification run, for which that format isn't appropriate. No need # to regenerate a coverage checkpoint there - it would convey the same # as what the --annotate=report already produced if a checkpoint is # needed. if thistest.options.qualif_level: return self.gen_one_xcov_report(inputs, format="xcov", options=sco_options)
def run(self): """Evaluate source coverage as exercised by self.drivers""" self.log() # Whatever the kind of test, we get to a Working Directory and # switch back when done: self.to_workdir(self.rwdir()) # Compute our GPR now, which we will need for build of single tests # and/or analysis later on if in gprmode. Turn inlining off for the # driver unit, so we exercise the functional code as separately # compiled, not as an inlined version of it in a non-representative # driver context. # Most of the tests with coverage control operate within # an extra subdir level this_depth = (thistest.depth + 1 if self.covctl else thistest.depth) self.gpr = gprfor( mains=self.drivers, prjid="gen", srcdirs=["../" * n + "src" for n in range(1, this_depth)], exedir=self.abdir(), main_cargs="-fno-inline", langs=["Ada", "C"], deps=self.covctl.deps if self.covctl else (), extra=self.covctl.gpr() if self.covctl else "") # For single tests (no consolidation), we first need to build, # producing the binary to execute and the ALIs files, then to gnatcov # run to get an execution trace. All these we already have for # consolidation tests, and there's actually no need to build if we # were provided a bin directory to reuse: if self.singletest() and not self.wdctl.reuse_bin: gprbuild(self.gpr, extracargs=self.extracargs) # Compute the gnatcov command line argument we'll pass to convey # the set of scos to operate upon. Note that we need these for # both gnatcov run and gnatcov coverage. thistest.gprmode = (thistest.options.gprmode or (self.covctl and self.covctl.requires_gpr())) self.scoptions = ( to_list(self.covctl.scoptions) if (self.covctl and self.covctl.scoptions) else ["-P%s" % self.gpr] if thistest.gprmode else ["--scos=@%s" % list_to_file(self.ali_list(), "alis.list")]) # Remember which of these indicate the use of project files, which # might influence default output dirs for example. self.gproptions = [ opt for opt in self.scoptions if opt.startswith("-P") ] # Do gnatcov run now unless we're consolidating. We'll just reuse # traces from previous executions in the latter case. if self.singletest(): self.xcov_run(no_ext(self.drivers[0])) # At this point, we have everything we need for the analysis. Either # from the just done build+run in the single test case, or from # previous such sequences in the consolidation case. Run gnatcov # coverage to get actual coverage reports and check against our # Xpectation specs. self.gen_xcov_reports() self.check_expectations() self.to_homedir() thistest.flush() # Let callers retrieve execution data at will return self