def __process_one_test (self, qde): frame (text = ("report check for xfile = %s\n" % qde.xfile + "drivers = %s" % str(qde.drivers)), char = '~').display() # Count the number of expected exemption regions xregions = 0 for source_xrn in qde.xrnotes.values(): for kind in xNoteKinds: xregions += len (source_xrn[kind]) # We're looking at the last report produced, with the last # applicable xcov-level self.__setup_expectations( ntraces = len(qde.drivers), xcovlevel = xcovlevel_from[os.path.basename(qde.wdir)[0:3]], xregions = xregions ) reports = ls (os.path.join (qde.wdir, "test.rep")) thistest.fail_if ( len (reports) != 1, "expected 1 report, found %d" % len (reports)) self.report = Tfile (reports[0], self.__process_line) [rpe.check () for rpe in self.rpElements]
def run(self): TestCase.run(self) thistest.flush() self.reported_diags = [ HarnessDiagnostic(text=errtext) for errtext in re.findall( pattern="^ \* (?:exception|\(.*\)) .*", string=contents_of(thistest.report.report_file), flags=re.M) ] [ self.__count_match_on(reported, expected) for reported in self.reported_diags for expected in self.expected_diags if expected.match(reported) ] [ thistest.fail_if(expected.nmatches != 1, "expectation check failed - %s" % expected.text) for expected in self.expected_diags ] for i in self.reported_diags: print i.__dict__ [ thistest.fail_if(reported.nmatches != 1, "reporting check failed - %s" % reported.text) for reported in self.reported_diags ]
def check(self): nmatches = len (self.matches) 0 and thistest.log ( "--\nChecking %s:\n" % str(self) + "pattern = '%s', nexpected = %d, nmatches = %d\n" % (self.pattern, self.nexpected, nmatches) + "pre = %s" % str (self.pre) ) # Check that we have the number of expected matches thistest.fail_if ( nmatches != self.nexpected, '%d matches of pattern "%s", != expected %d' % ( nmatches, self.pattern, self.nexpected) ) # If we expected matches, have some, and have an ordering # constraint specified, check it if self.nexpected > 0 and nmatches != 0 and self.pre: last_pre = self.pre.__last_match().lno first_self = self.__first_match().lno thistest.fail_if ( last_pre > first_self, 'first match for "%s" (%d) too early wrt predecessor "%s" (%d)' % (self.pattern, first_self, self.pre.pattern, last_pre) )
def check_xcov_content(filename, expected_cov): """ Dumbed-down version of coverage matching. Check that the XCOV file "filename" matches some expected coverage data. "expected_cov" is a dict like: >>> {'+': {5, 7}, '!': {6}} This is interpreted as: lines 5 and 7 must be fully covered (+), line 6 must be partially covered (!) and all other lines must be no-code (.). """ got_cov = collections.defaultdict(set) with open(filename) as f: for line in f: m = COV_RE.match(line) if m: lineno, cov_char = m.groups() if cov_char != '.': got_cov[cov_char].add(int(lineno)) thistest.fail_if( got_cov != expected_cov, '{}: unexpected coverage report content:\n' 'Expected: {}\n' 'But got: {}\n'.format(filename, fmt_cov(expected_cov), fmt_cov(got_cov)))
def check_xcov_reports(xcov_filename_pattern, expected_cov): """ Check the set of XCOV report files and their content. Collect files that match "xcov_filename_pattern" (a glob pattern) and check the set of files matches "expected_cov". Then, check that each report matches the expected coverage results. "expected_cov" is a mapping: filename -> coverage data. See "check_xcov_content" for the coverage data format. """ def fmt_sorted_indented_list(items): return '\n'.join(' {}'.format(s) for s in sorted(items)) xcov_files = {f for f in glob.glob(xcov_filename_pattern)} thistest.fail_if( xcov_files != set(expected_cov), 'Unexpected XCOV files. Expected:\n' '{}\n' 'But got instead:\n' '{}\n'.format(fmt_sorted_indented_list(expected_cov), fmt_sorted_indented_list(xcov_files))) for filename, cov_data in expected_cov.items(): if filename in xcov_files: check_xcov_content(filename, cov_data)
def run(self): tmp = Wdir('tmp_') # Compile all the sources. This method will not work if there are # sources that are not in the "." directory, but since executabes are # processed next, there will be an error if not all sources are # compiled. project = gprfor(self.sources, srcdirs=[".."], main_cargs=self.options) gprbuild(project, gargs=["-bargs", "-z"]) # If requested, check at least one non statement SCO in alis if self.ensure_dcscos: for ali in self.alis: thistest.fail_if(not match('^C[^S ]', ali, re.MULTILINE), "couldn't find non-statement SCO in %s" % ali) # Run xcov map-routines and check absence of errors mapoutput = do( maybe_valgrind([ XCOV, 'map-routines', '-v', '--scos=@{}'.format(list_to_file(self.alis)), ] + self.execs)) maperrors = [ str(m) for m in re.findall(r"(\*\*\*|\!\!\!)(.*)", mapoutput) ] thistest.log('\n'.join(maperrors)) thistest.fail_if( maperrors, "expect no map-routines error for %s" % ", ".join(self.sources)) tmp.to_homedir()
def checked_xcov(args, out_file): """ Run "xcov" and make the testcase fail if the output file is not empty. """ xcov(args, out_file) out = contents_of(out_file) thistest.fail_if( out, 'gnatcov output not empty ({}):\n' ' {}\n' '{}'.format(out_file, ' '.join(args), out))
def check_xcov_reports(xcov_filename_pattern, expected_cov, cwd=None): """ Check the set of XCOV report files and their content. Collect files that match "xcov_filename_pattern" (a glob pattern) and check the set of files matches "expected_cov". Then, check that each report matches the expected coverage results. "expected_cov" is a mapping: filename -> coverage data. See "check_xcov_content" for the coverage data format. If "cwd" is not None, it must be a valid directory name, and both the filename patterns and the file names in expected_cov must be relative to it. """ def fmt_sorted_indented_list(items): return '\n'.join(' {}'.format(s) for s in sorted(items)) # Avoid discrepancies between filenames on Windows and Unix. Although it is # not the canonical representation, Windows supports using slash as # separators, so use it. def canonicalize_file(filename): return filename.replace('\\', '/') home_dir = None try: if cwd is not None: home_dir = os.getcwd() os.chdir(cwd) xcov_files = { canonicalize_file(filename) for filename in glob.glob(xcov_filename_pattern) } expected_cov = { canonicalize_file(filename): cov_data for filename, cov_data in expected_cov.items() } thistest.fail_if( xcov_files != set(expected_cov), 'Unexpected XCOV files. Expected:\n' '{}\n' 'But got instead:\n' '{}\n'.format(fmt_sorted_indented_list(expected_cov), fmt_sorted_indented_list(xcov_files))) for filename, cov_data in expected_cov.items(): if filename in xcov_files: check_xcov_content(filename, cov_data) finally: if home_dir is not None: os.chdir(home_dir)
def gprbuild_wrapper(root_project, gargs): # Honor build relevant switches from gprsw here gprbuild(root_project, gargs=gprsw.build_switches + gargs + extra_gprbuild_args, extracargs=extra_gprbuild_cargs, trace_mode=trace_mode) if check_gprbuild_output: gprbuild_out = contents_of('gprbuild.out') thistest.fail_if( gprbuild_out, "gprbuild's output (gprbuild.out) is not empty:\n{}" .format(indent(gprbuild_out)))
def check_xcov_content(filename, expected_cov): """ Dumbed-down version of coverage matching. Check that the XCOV file "filename" matches some expected coverage data. "expected_cov" is a dict like: >>> {'+': {5, 7}, '!': {6}} This is interpreted as: lines 5 and 7 must be fully covered (+), line 6 must be partially covered (!) and all other lines must be no-code (.) or fully covered (+). """ def remove_empty_sets(data): """ Remove entries in "data" that contain empty sets of lines. """ return { annotation: lines for annotation, lines in data.items() if lines } # Check that expected coverage data contain only supported line annotations invalid_line_annotations = set(expected_cov) - {'+', '!', '-'} assert not invalid_line_annotations, ( 'Invalid line annotations: {}'.format(' '.join( sorted(invalid_line_annotations)))) got_cov = collections.defaultdict(set) dot_lines = set() with open(filename) as f: for line in f: m = COV_RE.match(line) if m: lineno, cov_char = m.groups() lineno = int(lineno) if cov_char == '.': dot_lines.add(lineno) else: got_cov[cov_char].add(lineno) got_cov = dict(got_cov) # Compute the set of lines that are expected not to be tagged as no-code # and refine expectations to expect "+" when we got "+" while we expected # nothing specific. expected_non_dot_lines = set() for lines in expected_cov.values(): expected_non_dot_lines.update(lines) refined_expectations = collections.defaultdict(set) refined_expectations.update(expected_cov) for line in got_cov.get('+', set()): if line not in expected_non_dot_lines: refined_expectations['+'].add(line) got_cov = remove_empty_sets(got_cov) refined_expectations = remove_empty_sets(refined_expectations) thistest.fail_if( got_cov != refined_expectations, '{}: unexpected coverage report content:\n' 'Expected: {}\n' 'Refined to: {}\n' 'But got: {}\n'.format(filename, fmt_cov(expected_cov), fmt_cov(refined_expectations), fmt_cov(got_cov)))