def results(logger, tests, baseline, args, result_stats): for test in tests: publish.json_status(logger, args, "rebuilding %s" % test.name) # If debug logging is enabled this will provide fine grained # per-test timing. with logger.debug_time("processing test %s", test.name): # Filter out tests that are being ignored? ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) continue # Filter out test results that are being skipped. # # XXX: In the default case (skip=[UNTESTED]) this should # be cheap (does OUTPUT/ exist?). It isn't, instead a # full post-mortem analysis is performed. # # This is noticeable when printing static test value such # as the test's name takes far longer than one would # expect. result = post.mortem(test, args, baseline=baseline, output_directory=test.saved_output_directory, quick=args.quick) if args.update: result.save() if args.skip: if skip.result(logger, args, result): result_stats.add_skipped(result) continue result_stats.add_result(result) publish.test_files(logger, args, result) publish.test_output_files(logger, args, result) publish.json_result(logger, args, result) if baseline and post.Issues.CRASHED.isdisjoint(result.issues): # Since there is a baseline and the test didn't crash # limit what is printed to just those where the # baseline's result is different. # # Note that, this skips baseline-different - where the # baseline failed for a different reason. if {post.Issues.BASELINE_FAILED, post.Issues.BASELINE_PASSED}.isdisjoint(result.issues): continue b = args.json and printer.JsonBuilder( sys.stdout) or printer.TextBuilder(sys.stdout) printer.build_result(logger, result, baseline, args, args.print, b) publish.json_status(logger, args, "finished")
def results(logger, tests, baseline, args, result_stats): for test in tests: publish.json_status(logger, args, "rebuilding %s" % test.name) # If debug logging is enabled this will provide fine grained # per-test timing. with logger.debug_time("processing test %s", test.name): # Filter out tests that are being ignored? ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) continue # Filter out test results that are being skipped. # # XXX: In the default case (skip=[UNTESTED]) this should # be cheap (does OUTPUT/ exist?). It isn't, instead a # full post-mortem analysis is performed. # # This is noticable when printing static test value such # as the test's name takes far longer than one would # expect. result = post.mortem(test, args, baseline=baseline, output_directory=test.saved_output_directory, quick=args.quick, update=args.update) if args.skip: if skip.result(logger, args, result): result_stats.add_skipped(result) continue result_stats.add_result(result) publish.test_files(logger, args, result) publish.test_output_files(logger, args, result) publish.json_result(logger, args, result) # If there is a baseline; limit what is printed to just # those that differ. if baseline: baseline_issue = False for issue in result.issues: if "baseline" in issue: baseline_issue = True break if not baseline_issue: continue b = args.json and printer.JsonBuilder( sys.stdout) or printer.TextBuilder(sys.stdout) printer.build_result(logger, result, baseline, args, args.print, b) publish.json_status(logger, args, "finished")
def results(logger, tests, baseline, args, result_stats): for test in tests: publish.json_status(logger, args, "rebuilding %s" % test.name) # If debug logging is enabled this will provide fine grained # per-test timing. with logger.debug_time("processing test %s", test.name): # Filter out tests that are being ignored? ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) continue # Filter out test results that are being skipped. # # XXX: In the default case (skip=[UNTESTED]) this should # be cheap (does OUTPUT/ exist?). It isn't, instead a # full post-mortem analysis is performed. # # This is noticeable when printing static test value such # as the test's name takes far longer than one would # expect. result = post.mortem(test, args, baseline=baseline, output_directory=test.saved_output_directory, quick=args.quick) if args.update: result.save() if args.skip: if skip.result(logger, args, result): result_stats.add_skipped(result) continue result_stats.add_result(result) publish.test_files(logger, args, result) publish.test_output_files(logger, args, result) publish.json_result(logger, args, result) if baseline and post.Issues.CRASHED.isdisjoint(result.issues): # Since there is a baseline and the test didn't crash # limit what is printed to just those where the # baseline's result is different. # # Note that, this skips baseline-different - where the # baseline failed for a different reason. if {post.Issues.BASELINE_FAILED, post.Issues.BASELINE_PASSED}.isdisjoint(result.issues): continue b = args.json and printer.JsonBuilder(sys.stdout) or printer.TextBuilder(sys.stdout) printer.build_result(logger, result, baseline, args, args.print, b) publish.json_status(logger, args, "finished")
def results(logger, tests, baseline, args, result_stats): failures = 0 unresolved = 0 passed = 0 nr = 0 for test in tests: nr = nr + 1 publish.json_status(logger, args, "rebuilding %s (test %d of %d)" % (test.name, nr, len(tests))) # If debug logging is enabled this will provide fine grained # per-test timing. with logger.debug_time("processing test %s", test.name): # Filter out tests that are being ignored? ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) continue # Filter out test results that are being skipped. # # XXX: In the default case (skip=[UNTESTED]) this should # be cheap (does OUTPUT/ exist?). It isn't, instead a # full post-mortem analysis is performed. # # This is noticeable when printing static test value such # as the test's name takes far longer than one would # expect. result = post.mortem(test, args, baseline=baseline, output_directory=test.saved_output_directory, quick=args.quick) if args.update: result.save() if args.skip: if printer.Print.RESULT in args.print \ and skip.result(logger, args, result): result_stats.add_skipped(result) continue result_stats.add_result(result) if result.resolution in [post.Resolution.PASSED, post.Resolution.UNTESTED, post.Resolution.UNSUPPORTED]: passed = passed + 1 elif result.resolution in [post.Resolution.UNRESOLVED]: unresolved = unresolved + 1 else: failures = failures + 1 publish.test_files(logger, args, result) publish.test_output_files(logger, args, result) publish.json_result(logger, args, result) if baseline and test in baseline \ and not result.issues.crashed(): # Since there is a baseline and the test didn't crash # limit what is printed to just those where the # baseline's result is different. # # Note that, this skips baseline-different - where the # baseline failed for a different reason. if {post.Issues.BASELINE_FAILED, post.Issues.BASELINE_PASSED}.isdisjoint(result.issues): continue b = args.json and printer.JsonBuilder(sys.stdout) or printer.TextBuilder(sys.stdout) printer.build_result(logger, result, baseline, args, args.print, b) publish.json_results(logger, args) publish.json_summary(logger, args) publish.json_status(logger, args, "finished") # exit code if args.exit_ok: return 0 elif unresolved: return 125 # 'git bisect' magic for don't know elif failures: return 1 else: return 0