def write(self, arg): results = [framework.core.loadTestResults(arg)] summary = framework.summary.Summary(results) self.report.start() self.report.startSuite('piglit') try: for test in summary.allTests(): self.write_test(summary, test) finally: self.enter_path([]) self.report.stopSuite() self.report.stop()
def main(): parser = argparse.ArgumentParser() parser.add_argument("-o", "--overwrite", action="store_true", help="Overwrite existing directories") parser.add_argument("-l", "--list", action="store", help="Use test results from a list file") parser.add_argument("summaryDir", metavar="<Summary Directory>", help="Directory to put HTML files in") parser.add_argument("resultsFiles", metavar="<Results Files>", nargs="+", help="Results files to include in HTML") args = parser.parse_args() core.checkDir(args.summaryDir, not args.overwrite) results = [] for result_dir in args.resultsFiles: results.append(core.loadTestResults(result_dir)) summary = framework.summary.Summary(results) for j in range(len(summary.testruns)): tr = summary.testruns[j] tr.codename = filter(lambda s: s.isalnum(), tr.name) dirname = args.summaryDir + '/' + tr.codename core.checkDir(dirname, False) writeTestrunHtml(tr, dirname + '/index.html') for test in summary.allTests(): filename = dirname + '/' + testPathToHtmlFilename(test.path) writeResultHtml(test, test.results[j], filename) writefile(os.path.join(args.summaryDir, 'result.css'), readfile(os.path.join(templatedir, 'result.css'))) writefile(os.path.join(args.summaryDir, 'index.css'), readfile(os.path.join(templatedir, 'index.css'))) writeSummaryHtml(summary, args.summaryDir, 'all') writeSummaryHtml(summary, args.summaryDir, 'problems') writeSummaryHtml(summary, args.summaryDir, 'changes') writeSummaryHtml(summary, args.summaryDir, 'regressions') writeSummaryHtml(summary, args.summaryDir, 'fixes') writeSummaryHtml(summary, args.summaryDir, 'skipped')
def main(): try: options, args = getopt(sys.argv[1:], "hofl:", [ "help", "overwrite", "list" ]) except GetoptError: usage() OptionOverwrite = False OptionList = [] for name, value in options: if name == "-h" or name == "--help": usage() elif name == "-o" or name == "--overwrite": OptionOverwrite = True elif name == "-l" or name == "--list": OptionList += parse_listfile(value) OptionList += [[name] for name in args[1:]] if len(args) < 1 or len(OptionList) == 0: usage() summaryDir = args[0] core.checkDir(summaryDir, not OptionOverwrite) results = [] for result_dir in OptionList: results.append(loadresult(result_dir)) summary = framework.summary.Summary(results) for j in range(len(summary.testruns)): tr = summary.testruns[j] tr.codename = filter(lambda s: s.isalnum(), tr.name) dirname = summaryDir + '/' + tr.codename core.checkDir(dirname, False) writeTestrunHtml(tr, dirname + '/index.html') for test in summary.allTests(): filename = dirname + '/' + testPathToHtmlFilename(test.path) writeResultHtml(test, test.results[j], filename) writefile(os.path.join(summaryDir, 'result.css'), readfile(os.path.join(templatedir, 'result.css'))) writefile(os.path.join(summaryDir, 'index.css'), readfile(os.path.join(templatedir, 'index.css'))) writeSummaryHtml(summary, summaryDir, 'all') writeSummaryHtml(summary, summaryDir, 'problems') writeSummaryHtml(summary, summaryDir, 'changes') writeSummaryHtml(summary, summaryDir, 'regressions') writeSummaryHtml(summary, summaryDir, 'fixes')
def main(): try: options, args = getopt(sys.argv[1:], "hsdl:", ["help", "summary", "diff", "list"]) except GetoptError: usage() OptionList = [] CountsOnly = False DiffOnly = False for name, value in options: if name == "-h" or name == "--help": usage() elif name == "-s" or name == "--summary": CountsOnly = True elif name == "-d" or name == "--diff": DiffOnly = True elif name == "-l" or name == "--list": OptionList += parse_listfile(value) OptionList += [[name] for name in args[0:]] if len(args) == 0 and len(OptionList) == 0: usage() # make list of results results = [] for result_dir in OptionList: results.append(loadresult(result_dir)) summary = framework.summary.Summary(results) # possible test outcomes possible_results = ["pass", "fail", "crash", "skip", "warn"] if len(OptionList) > 1: possible_results.append("changes") # init the summary counters counts = {} for result in possible_results: counts[result] = 0 # get all results all = summary.allTests() # sort the results list by path all = sorted(all, key=lambda test: test.path) # loop over the tests for test in all: results = [] anyChange = False # loop over the results for multiple runs for j in range(len(summary.testruns)): outcome = test.results[j]['result'] # 'pass', 'fail', etc. # check for different results between multiple runs if len(results) >= 1 and not outcome in results: # something changed counts["changes"] += 1 anyChange = True results.append(outcome) # if all test runs had the same outcome: if not anyChange: counts[outcome] += 1 # print the individual test result line if DiffOnly: if anyChange: print "%s: %s" % (test.path, string.join(results, " ")) elif not CountsOnly: print "%s: %s" % (test.path, string.join(results, " ")) # print the summary info print "summary:" total = 0 for result in possible_results: print " %7s: %5d" % (result, counts[result]) total += counts[result] print " total: %5d" % total
def main(): try: options, args = getopt(sys.argv[1:], "hsdl:", [ "help", "summary", "diff", "list" ]) except GetoptError: usage() OptionList = [] CountsOnly = False DiffOnly = False for name, value in options: if name == "-h" or name == "--help": usage() elif name == "-s" or name == "--summary": CountsOnly = True elif name == "-d" or name == "--diff": DiffOnly = True elif name == "-l" or name == "--list": OptionList += parse_listfile(value) OptionList += [[name] for name in args[0:]] if len(args) == 0 and len(OptionList) == 0: usage() # make list of results results = [] for result_dir in OptionList: results.append(loadresult(result_dir)) summary = framework.summary.Summary(results) # possible test outcomes possible_results = [ "pass", "fail", "crash", "skip", "warn" ] if len(OptionList) > 1: possible_results.append("changes") # init the summary counters counts = {} for result in possible_results: counts[result] = 0 # get all results all = summary.allTests() # sort the results list by path all = sorted(all, key=lambda test: test.path) # loop over the tests for test in all: results = [] anyChange = False # loop over the results for multiple runs for j in range(len(summary.testruns)): outcome = test.results[j]['result'] # 'pass', 'fail', etc. # check for different results between multiple runs if len(results) >= 1 and not outcome in results: # something changed counts["changes"] += 1 anyChange = True results.append(outcome) # if all test runs had the same outcome: if not anyChange: counts[outcome] += 1 # print the individual test result line if DiffOnly: if anyChange: print "%s: %s" % (test.path, string.join(results," ")) elif not CountsOnly: print "%s: %s" % (test.path, string.join(results," ")) # print the summary info print "summary:" total = 0 for result in possible_results: print " %7s: %5d" % (result, counts[result]) total += counts[result] print " total: %5d" % total