def results(logger, tests, baseline, args, result_stats): for test in tests: publish.json_status(logger, args, "rebuilding %s" % test.name) # If debug logging is enabled this will provide fine grained # per-test timing. with logger.debug_time("processing test %s", test.name): # Filter out tests that are being ignored? ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) continue # Filter out test results that are being skipped. # # XXX: In the default case (skip=[UNTESTED]) this should # be cheap (does OUTPUT/ exist?). It isn't, instead a # full post-mortem analysis is performed. # # This is noticeable when printing static test value such # as the test's name takes far longer than one would # expect. result = post.mortem(test, args, baseline=baseline, output_directory=test.saved_output_directory, quick=args.quick) if args.update: result.save() if args.skip: if skip.result(logger, args, result): result_stats.add_skipped(result) continue result_stats.add_result(result) publish.test_files(logger, args, result) publish.test_output_files(logger, args, result) publish.json_result(logger, args, result) if baseline and post.Issues.CRASHED.isdisjoint(result.issues): # Since there is a baseline and the test didn't crash # limit what is printed to just those where the # baseline's result is different. # # Note that, this skips baseline-different - where the # baseline failed for a different reason. if {post.Issues.BASELINE_FAILED, post.Issues.BASELINE_PASSED}.isdisjoint(result.issues): continue b = args.json and printer.JsonBuilder( sys.stdout) or printer.TextBuilder(sys.stdout) printer.build_result(logger, result, baseline, args, args.print, b) publish.json_status(logger, args, "finished")
def results(logger, tests, baseline, args, result_stats): for test in tests: publish.json_status(logger, args, "rebuilding %s" % test.name) # If debug logging is enabled this will provide fine grained # per-test timing. with logger.debug_time("processing test %s", test.name): # Filter out tests that are being ignored? ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) continue # Filter out test results that are being skipped. # # XXX: In the default case (skip=[UNTESTED]) this should # be cheap (does OUTPUT/ exist?). It isn't, instead a # full post-mortem analysis is performed. # # This is noticable when printing static test value such # as the test's name takes far longer than one would # expect. result = post.mortem(test, args, baseline=baseline, output_directory=test.saved_output_directory, quick=args.quick, update=args.update) if args.skip: if skip.result(logger, args, result): result_stats.add_skipped(result) continue result_stats.add_result(result) publish.test_files(logger, args, result) publish.test_output_files(logger, args, result) publish.json_result(logger, args, result) # If there is a baseline; limit what is printed to just # those that differ. if baseline: baseline_issue = False for issue in result.issues: if "baseline" in issue: baseline_issue = True break if not baseline_issue: continue b = args.json and printer.JsonBuilder( sys.stdout) or printer.TextBuilder(sys.stdout) printer.build_result(logger, result, baseline, args, args.print, b) publish.json_status(logger, args, "finished")
def results(logger, tests, baseline, args, result_stats): for test in tests: publish.json_status(logger, args, "rebuilding %s" % test.name) # If debug logging is enabled this will provide fine grained # per-test timing. with logger.debug_time("processing test %s", test.name): # Filter out tests that are being ignored? ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) continue # Filter out test results that are being skipped. # # XXX: In the default case (skip=[UNTESTED]) this should # be cheap (does OUTPUT/ exist?). It isn't, instead a # full post-mortem analysis is performed. # # This is noticeable when printing static test value such # as the test's name takes far longer than one would # expect. result = post.mortem(test, args, baseline=baseline, output_directory=test.saved_output_directory, quick=args.quick) if args.update: result.save() if args.skip: if skip.result(logger, args, result): result_stats.add_skipped(result) continue result_stats.add_result(result) publish.test_files(logger, args, result) publish.test_output_files(logger, args, result) publish.json_result(logger, args, result) if baseline and post.Issues.CRASHED.isdisjoint(result.issues): # Since there is a baseline and the test didn't crash # limit what is printed to just those where the # baseline's result is different. # # Note that, this skips baseline-different - where the # baseline failed for a different reason. if {post.Issues.BASELINE_FAILED, post.Issues.BASELINE_PASSED}.isdisjoint(result.issues): continue b = args.json and printer.JsonBuilder(sys.stdout) or printer.TextBuilder(sys.stdout) printer.build_result(logger, result, baseline, args, args.print, b) publish.json_status(logger, args, "finished")
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_post_time = None old_result = None backup_directory = os.path.join(args.backup_directory, test.name) # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) publish.json_status(logger, args, "processing %s" % test_prefix) with logger.time("processing test %s", test_prefix): # Ignoring the test completely? # # So that there's no possible confusion over the test being # run; remove any pre-existing output. ignored, details = ignore.test(logger, args, test) if ignored: # The isdir() test followed by a simple move, while # racy, should be good enough. if os.path.isdir(test.output_directory): logger.info("moving '%s' to '%s'", test.output_directory, backup_directory) os.makedirs(os.path.dirname(backup_directory), exist_ok=True) os.rename(test.output_directory, backup_directory) result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Skip the test, leaving old results? # # For instance, during a test re-run, skip any tests that are # passing. # # The check below compares the test and expected output, # ignoring any previous test result. This way the results are # consistent with kvmresults.py which always reflects the # current sources. # # - modifying the expected output so that it no longer matches # the last result is a fail # # - modifying the expected output so that it matches the last # result is a pass old_result = post.mortem(test, args, domain_prefix=domain_prefix, quick=False) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) publish.everything(logger, args, old_result) return # Running the test ... # # From now on the test will be run so need to perform post # mortem. try: if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create just the OUTPUT/ directory. # # If the directory already exists, copy the contents # BACKUP/. Do it file-by-file so that, at no point, the # OUTPUT/ directory is missing (having an OUTPUT/ # directory implies the test was started). # # Don't try to create the path. If the parent directory # is missing, this and the entire script will crash. # Someone did something nasty like deleted the parent # directory. # # By backing up each test just before it is started, # leaves a trail of what tests were attempted during a # test run. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a # test is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the # time taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is # not under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileExistsError: logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, move it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug # log writing to that directory; include timing for this # test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join( test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: # boot the domains with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains( logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: # Bail. Being unable to boot the domains is a # disaster. The test is UNRESOLVED. logger.exception("TIMEOUT while booting domains") return except pexpect.EOF: # Bail. Being unable to attach to the domains # is a disaster. The test is UNRESOLVED. logger.exception("EOF while booting domains") return except: logger.exception("EXCEPTION while booting domains") raise # Run the scripts directly with logger.time("running scripts %s", test.host_scripts) as test_script_time: with tcpdump.Dump( logger, domain_prefix, test.output_directory, [ test_domain.domain for test_domain in test_domains.values() ], enable=args.tcpdump): try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join( test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.redirect_output( open(output, "w")) # If a script times out, don't try to run # post-mortem.sh. host_timed_out = None for script in test.host_scripts: test_domain = test_domains[script.host_name] try: test_domain.read_file_run(script.path) except pexpect.TIMEOUT as e: # A timeout while running a test # script is a sign that a command # hung. message = "%s while running script %s" % ( post.Issues.TIMEOUT, script) logger.warning("*** %s ***" % message) test_domain.console.append_output( "%s %s %s", post.LHS, message, post.RHS) host_timed_out = script.host_name break except pexpect.EOF as e: # An EOF while a script is running # is a sign that libvirt crashed. message = "%s while running script %s" % ( post.Issues.EOF, script) logger.exception("*** %s ***" % message) test_domain.console.append_output( "%s %s %s", post.LHS, message, post.RHS) host_timed_out = script.host_name break except BaseException as e: # if there is an exception, write # it to the console message = "%s %s while running script %s" % ( post.Issues.EXCEPTION, str(e), script) logger.exception("*** %s ***" % message) test_domain.console.append_output( "\n%s %s %s\n", post.LHS, message, post.RHS) raise if args.run_post_mortem is False: logger.warning( "+++ skipping script post-mortem.sh -- disabled +++" ) elif host_timed_out: logger.warning( "+++ skipping script post-mortem.sh -- %s timed out +++" % (host_timed_out)) else: # None or True script = "../bin/post-mortem.sh" for host_name in test.host_names: test_domain = test_domains[host_name] test_domain.console.append_output( "%s post-mortem %s", post.LHS, post.LHS) logger.info("running %s on %s", script, host_name) try: status = test_domain.console.run( script) if status: logger.error( "%s failed on %s with status %s", script, host_name, status) else: test_domain.console.append_output( "%s post-mortem %s", post.RHS, post.RHS) except pexpect.TIMEOUT as e: # A post-mortem ending with a # TIMEOUT gets treated as a # FAIL. message = "%s while running script %s" % ( post.Issues.TIMEOUT, script) logger.warning("*** %s ***" % message) test_domain.console.append_output( "%s %s %s", post.LHS, message, post.RHS) continue # always teardown except pexpect.EOF as e: # A post-mortem ending with an # EOF gets treated as # unresloved. message = "%s while running script %s" % ( post.Issues.EOF, script) logger.exception("*** %s ***" % message) test_domain.console.append_output( "%s %s %s", post.LHS, message, post.RHS) continue # always teardown except BaseException as e: # if there is an exception, write # it to the console message = "%s %s while running script %s" % ( post.Issues.EXCEPTION, str(e), script) logger.exception(message) test_domain.console.append_output( "\n%s %s %s\n", post.LHS, message, post.RHS) raise for test_domain in test_domains.values(): test_domain.console.append_output(post.DONE) finally: # Close the redirected test-result log files logger.info( "closing all the test domain log files") for test_domain in test_domains.values(): test_domain.console.close_output() # Always disconnect from the test domains. logger.info("closing all the test domains") for test_domain in test_domains.values(): test_domain.close() finally: with logger.time("post-mortem %s", test_prefix): # The test finished; it is assumed that post.mortem # can deal with a crashed test. result = post.mortem(test, args, domain_prefix=domain_prefix) logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.issues and " ", result.issues, suffix) result.save() # If the test was run (a fresh run would delete RESULT) # and finished (resolved in POSIX terminology), emit # enough JSON to fool scripts like pluto-testlist-scan.sh. # # A test that timed-out or crashed, isn't considered # resolved so the file isn't created. # # XXX: this should go away. result_file = os.path.join(test.output_directory, "RESULT") if not os.path.isfile(result_file) \ and result.resolution.isresolved(): RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.status, jsonutil.result.result: result, jsonutil.result.issues: result.issues, jsonutil.result.hosts: test.host_names, jsonutil.result.time: jsonutil.ftime(test_runtime.start), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), } j = jsonutil.dumps(RESULT) logger.debug("filling '%s' with json: %s", result_file, j) with open(result_file, "w") as f: f.write(j) f.write("\n") # Do this after RESULT is created so it too is published. publish.everything(logger, args, result) publish.json_status(logger, args, "finished %s" % test_prefix) test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) # test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) suffix = "******" test_stats.add(test, "total") # Would the number of tests to be [re]run be better? test_lapsed_time = timing.Lapsed() test_prefix = "%s %s (test %d of %d)" % (suffix, test.name, test_count, tests_count) ignored, include_ignored, details = ignore.test(logger, args, test) if ignored and not include_ignored: result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") # No need to log all the ignored tests when an explicit # sub-set of tests is being run. For instance, when running # just one test. if not args.test_name: logger.info("%s ignored (%s)", test_prefix, details) return # Be lazy with gathering the results, don't run the sanitizer or # diff. # # XXX: There is a bug here where the only difference is white # space. The test will show up as failed when it previousl showed # up as a whitespace pass. # # The presence of the RESULT file is a proxy for detecting that # the test was incomplete. old_result = post.mortem(test, args, test_finished=None, skip_diff=True, skip_sanitize=True) if skip.result(logger, args, old_result): logger.info("%s skipped (previously %s)", test_prefix, old_result) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) return if old_result: test_stats.add(test, "tests", "retry") logger.info("%s started (previously %s) ....", test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s started ....", test_prefix) test_stats.add(test, "tests") # Move the contents of the existing OUTPUT directory to # BACKUP_DIRECTORY. Do it file-by-file so that, at no point, the # directory is empty. # # By moving each test just before it is started a trail of what # tests were attempted at each run is left. # # XXX: During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a test is # added and/or a test is run (adding files under <test>/OUTPUT), # the boot process (and consequently the time taken to run a test) # keeps increasing. # # Always moving the directory contents to the BACKUP_DIRECTORY # mitigates this some. backup_directory = None if os.path.exists(test.output_directory): backup_directory = os.path.join(args.backup_directory, test.name) logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Copy "empty" OUTPUT directories too. args.dry_run or os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) args.dry_run or os.replace(src, dst) debugfile = None result = None # At least one iteration; above will have filtered out skips and # ignored for attempt in range(args.attempts): test_stats.add(test, "attempts") # Create the OUTPUT directory. try: if not args.dry_run: os.mkdir(test.output_directory) elif os.exists(test.output_directory): raise FileExistsError() except FileExistsError: # On first attempt, the OUTPUT directory will be empty # (see above) so no need to save. if attempt > 0: backup_directory = os.path.join(test.output_directory, str(attempt)) logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) args.dry_run or os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): if os.path.isfile(src): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) args.dry_run or os.replace(src, dst) # Start a debug log in the OUTPUT directory; include timing # for this specific test attempt. with logger.timer_stack(), logger.debug_stack(test.output_directory, "debug.log"): attempt_lapsed_time = timing.Lapsed() attempt_prefix = "%s (attempt %d of %d)" % (test_prefix, attempt+1, args.attempts) logger.info("%s started ....", attempt_prefix) if backup_directory: logger.info("contents of '%s' moved to '%s'", test.output_directory, backup_directory) backup_directory = None ending = "undefined" try: if not args.dry_run: _run_test(domain_prefix, test, args, boot_executor) ending = "finished" result = post.mortem(test, args, test_finished=True, update=(not args.dry_run)) if not args.dry_run: # Store enough to fool the script # pluto-testlist-scan.sh and leave a marker to # indicate that the test finished. logger.info("storing result in '%s'", test.result_file()) with open(test.result_file(), "w") as f: f.write('"result": "%s"\n' % result) except pexpect.TIMEOUT as e: logger.exception("**** test %s timed out ****", test.name) ending = "timed-out" # Still peform post-mortem so that errors are # captured, but force the result to incomplete. result = post.mortem(test, args, test_finished=False, update=(not args.dry_run)) # Since the OUTPUT directory exists, all paths to here # should have a non-null RESULT. test_stats.add(test, "attempts", ending, str(result)) logger.info("%s %s%s%s after %s %s", attempt_prefix, result, result.errors and " ", result.errors, attempt_lapsed_time, suffix) if result.passed: break # Above will have set RESULT. During a control-c or crash the # below will not be executed. test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) logger.info("%s %s%s%s after %s %s", test_prefix, result, result.errors and " ", result.errors, test_lapsed_time, suffix) test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_start_time = test_total_time = None # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) with logger.time("processing test %s", test_prefix) as test_total_time: ignored, include_ignored, details = ignore.test(logger, args, test) if ignored and not include_ignored: result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") # No need to log all the ignored tests when an explicit # sub-set of tests is being run. For instance, when running # just one test. if not args.test_name: logger.info("$s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Be lazy with gathering the results, don't run the sanitizer or # diff. # # XXX: There is a bug here where the only difference is white # space. The test will show up as failed when it previously # showed up as a white-space pass. # # The presence of the RESULT file is a proxy for detecting that # the test was incomplete. old_result = post.mortem(test, args, test_finished=None, skip_diff=True, skip_sanitize=True) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) return if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create the OUTPUT/ directory; if it already exists, move any # contents to BACKUP/. Do it file-by-file so that, at no # point, the OUTPUT/ directory missing (presence of OUTPUT/ # implies the test was started). # # By backing up each test just before it is started, a trail # of what tests were attempted during each run is created. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a test # is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the time # taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is not # under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileExistsError: backup_directory = os.path.join(args.backup_directory, test.name) logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, copy it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug log # writing to that directory; include timing for this test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join(test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: logger.exception("timeout while booting domains", test.name) # Bail before RESULT is written - being unable to # boot the domains is a disaster. return # Run the scripts directly with logger.time( "running scripts %s", " ".join(("%s:%s" % (host, script)) for host, script in test.host_script_tuples)) as test_script_time: try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join( test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.output(open(output, "w")) for host, script in test.host_script_tuples: if args.stop_at == script: logger.error( "stopping test run at (before executing) script %s", script) break test_domain = test_domains[host] test_domain.read_file_run(script) result = post.mortem(test, args, test_finished=True, update=True) except pexpect.TIMEOUT as e: logger.exception( "**** timeout out while running script %s ****", script) # Still peform post-mortem so that errors are # captured, but force the result to # incomplete. result = post.mortem(test, args, test_finished=False, update=True) finally: # Close the redirected test-result log files for test_domain in test_domains.values(): logfile = test_domain.console.output() logfile.close() # Always disconnect from the test domains. logger.info("closing all test domains") for test_domain in test_domains.values(): logfile = test_domain.console.output() if logfile: logfile.close() test_domain.close() # Above will have set RESULT. Exceptions such as control-c or # a crash bypass this code. logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.errors and " ", result.errors, suffix) # Since the test finished, emit enough JSON to fool scripts like # pluto-testlist-scan.sh. # # This also leaves a simple marker to indicate that the test # finished. # # A more robust way of doing this would be to mark each of the # console logs as complete as it is closed. # # More detailed information can be extracted from the debug.log. hosts = {} for host in sorted(test.host_names): if host in result.errors: hosts[host] = [error for error in result.errors[host]] else: hosts[host] = ["passed"] RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.expected_result, jsonutil.result.result: str(result), jsonutil.result.time: jsonutil.ftime(datetime.now()), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), jsonutil.result.total_time: round(test_total_time.seconds(), 1), jsonutil.result.hosts: hosts, } j = jsonutil.dumps(RESULT) logger.info("filling '%s' with json: %s", test.result_file(), j) with open(test.result_file(), "w") as f: f.write(j) f.write("\n") test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_start_time = test_total_time = None # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) with logger.time("processing test %s", test_prefix) as test_total_time: ignored, include_ignored, details = ignore.test(logger, args, test) if ignored and not include_ignored: result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") # No need to log all the ignored tests when an explicit # sub-set of tests is being run. For instance, when running # just one test. if not args.test_name: logger.info("$s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Be lazy with gathering the results, don't run the sanitizer or # diff. # # XXX: There is a bug here where the only difference is white # space. The test will show up as failed when it previously # showed up as a white-space pass. # # The presence of the RESULT file is a proxy for detecting that # the test was incomplete. old_result = post.mortem(test, args, test_finished=None, skip_diff=True, skip_sanitize=True) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) return if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create the OUTPUT/ directory; if it already exists, move any # contents to BACKUP/. Do it file-by-file so that, at no # point, the OUTPUT/ directory missing (presence of OUTPUT/ # implies the test was started). # # By backing up each test just before it is started, a trail # of what tests were attempted during each run is created. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a test # is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the time # taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is not # under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileExistsError: backup_directory = os.path.join(args.backup_directory, test.name) logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, copy it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug log # writing to that directory; include timing for this test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join(test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: logger.exception("timeout while booting domains", test.name) # Bail before RESULT is written - being unable to # boot the domains is a disaster. return # Run the scripts directly with logger.time("running scripts %s", " ".join(("%s:%s" % (host, script)) for host, script in test.host_script_tuples)) as test_script_time: try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join(test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.output(open(output, "w")) for host, script in test.host_script_tuples: if args.stop_at == script: logger.error("stopping test run at (before executing) script %s", script) break test_domain = test_domains[host] test_domain.read_file_run(script) result = post.mortem(test, args, test_finished=True, update=True) except pexpect.TIMEOUT as e: logger.exception("**** timeout out while running script %s ****", script) # Still peform post-mortem so that errors are # captured, but force the result to # incomplete. result = post.mortem(test, args, test_finished=False, update=True) finally: # Close the redirected test-result log files for test_domain in test_domains.values(): logfile = test_domain.console.output() logfile.close() # Always disconnect from the test domains. logger.info("closing all test domains") for test_domain in test_domains.values(): logfile = test_domain.console.output() if logfile: logfile.close() test_domain.close() # Above will have set RESULT. Exceptions such as control-c or # a crash bypass this code. logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.errors and " ", result.errors, suffix) # Since the test finished, emit enough JSON to fool scripts like # pluto-testlist-scan.sh. # # This also leaves a simple marker to indicate that the test # finished. # # A more robust way of doing this would be to mark each of the # console logs as complete as it is closed. # # More detailed information can be extracted from the debug.log. hosts = {} for host in sorted(test.host_names): if host in result.errors: hosts[host] = [error for error in result.errors[host]] else: hosts[host] = ["passed"] RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.expected_result, jsonutil.result.result: str(result), jsonutil.result.time: jsonutil.ftime(datetime.now()), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), jsonutil.result.total_time: round(test_total_time.seconds(), 1), jsonutil.result.hosts: hosts, } j = jsonutil.dumps(RESULT) logger.info("filling '%s' with json: %s", test.result_file(), j) with open(test.result_file(), "w") as f: f.write(j) f.write("\n") test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def results(logger, tests, baseline, args, result_stats): for test in tests: # If debug logging is enabled this will provide fine grained # per-test timing. with logger.debug_time("processing test %s", test.name): # Filter out tests that are being ignored? ignored, include_ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) if not include_ignored: continue # Filter out tests that have not been run result = None if not ignored and Print.result in args.print: result = post.mortem(test, args, baseline=baseline, output_directory=test.saved_output_directory, test_finished=None, skip_sanitize=args.quick or args.quick_sanitize, skip_diff=args.quick or args.quick_diff, update=args.update, update_sanitize=args.update_sanitize, update_diff=args.update_diff) if skip.result(logger, args, result): continue if not result: result_stats.add_ignored(test, str(result)) else: result_stats.add_result(result) sep = "" # Print the test's name/path if not args.prefix: # By default: when the path given on the command line # explicitly specifies a test's output directory # (found in TEST.SAVED_OUTPUT_DIRECTORY), print that; # otherwise print the path to the test's directory. print(sep, end="") print((test.saved_output_directory and test.saved_output_directory or test.directory), end="") sep = " " else: # Print the test name/path per command line if args.prefix is Prefix.name: print(sep, end="") print(test.name, end="") sep = " " elif args.prefix is Prefix.test_directory: print(sep, end="") print(test.directory, end="") sep = " " elif args.prefix is Prefix.output_directory: print(sep, end="") print((test.saved_output_directory and test.saved_output_directory or test.output_directory), end="") sep = " " if ignored: print(sep, end="") print("ignored", ignored, end="") sep = " " for p in args.print: if p in [Print.diffs]: continue elif p is Print.directory: print(sep, end="") print(test.directory, end="") sep = " " elif p is Print.expected_result: print(sep, end="") print(test.expected_result, end="") sep = " " elif p is Print.full_name: print(sep, end="") print(test.full_name, end="") sep = " " elif p is Print.host_names: for name in test.host_names: print(sep, end="") print(name, end="") sep = "," sep = " " elif p is Print.kind: print(sep, end="") print(test.kind, end="") sep = " " elif p is Print.name: print(sep, end="") print(test.name, end="") sep = " " elif p is Print.output_directory: print(sep, end="") print(test.output_directory, end="") sep = " " elif p is Print.result: if result: print(sep, end="") if result.errors: print(result, result.errors, end="") else: print(result, end="") sep = " " elif p is Print.sanitize_directory: print(sep, end="") print(test.sanitize_directory, end="") sep = " " elif p is Print.saved_output_directory: print(sep, end="") print(test.saved_output_directory, end="") sep = " " elif p is Print.scripts: for host, script in test.host_script_tuples: print(sep, end="") print("%s:%s" % (host, script), end="") sep = "," sep = " " else: print() print("unknown print option", p) print() if Print.diffs in args.print and result: for domain in result.diffs: for line in result.diffs[domain]: if line: print(line) sys.stdout.flush()
def main(): parser = argparse.ArgumentParser(description="write 'table.json' to standard output") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("--quick", action="store_true", help=("Use the previously generated '.console.txt' and '.console.diff' files")) parser.add_argument("--quick-sanitize", action="store_true", help=("Use the previously generated '.console.txt' file")) parser.add_argument("--quick-diff", action="store_true", help=("Use the previously generated '.console.diff' file")) parser.add_argument("--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files")) parser.add_argument("--update-sanitize", action="store_true", help=("Update the '.console.txt' file")) parser.add_argument("--update-diff", action="store_true", help=("Update the '.console.diff' file")) parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="%(metavar)s containing: a test, a testsuite (contains a TESTLIST file), a TESTLIST file, test output, or testsuite output") post.add_arguments(parser) testsuite.add_arguments(parser) logutil.add_arguments(parser) skip.add_arguments(parser) ignore.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmresults") tests = testsuite.load_testsuite_or_tests(logger, args.directories, args) # And check if not tests: logger.error("Invalid testsuite or test directories") return 1 columns = [ "Test", "Expected", "Result", "Run time", "Responder", "..." ] rows = [] for test in tests: sys.stderr.write("%s\n" % test.name) # Filter out tests that are being ignored? ignored, include_ignored, details = ignore.test(logger, args, test) if ignored: if not include_ignored: continue # Filter out tests that have not been run result = None if not ignored: result = post.mortem(test, args, baseline=None, output_directory=test.saved_output_directory, test_finished=None, skip_sanitize=args.quick or args.quick_sanitize, skip_diff=args.quick or args.quick_diff, update=args.update, update_sanitize=args.update_sanitize, update_diff=args.update_diff) if skip.result(logger, args, result): continue row = [ test.name, test.expected_result, str(result), "run-time" ] for host in sorted(test.host_names): errors = result.errors.errors if host in errors: row.append("%s %s" % (host, " ".join(sorted(errors[host])))) else: row.append("%s passed" % (host)) rows.append(row) summary = { "Total": 0, "passed": 0, "failed": 0, "abort": 0, "missing baseline": 0, "missing console output": 0, "missing OUTPUT": 0, "missing RESULT": 0, "ASSERT": 0, "CORE": 0, "EXPECT": 0, "GPFAULT": 0, "SEGFAULT": 0, "date": "0000-00-00", "dir": "testing/pluto", "runtime": 0, "runtime_str": "00:00:00", } table = { "suffix": "/OUTPUT", "summary": summary, "columns": columns, "rows": rows, "runDir": "???", } print(json.dumps(table, indent=2)) return 0
def results(logger, tests, baseline, args, result_stats): for test in tests: # If debug logging is enabled this will provide fine grained # per-test timing. with logger.debug_time("processing test %s", test.name): # Filter out tests that are being ignored? ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) continue # Filter out tests that have not been run result_cache = ResultCache(logger, test, args, baseline, result_stats) if args.skip: result = result_cache.result("skip") if skip.result(logger, args, result): result_stats.add_skipped(result) continue b = args.json and JsonOutput() or PrintOutput() # Print the test's name/path for p in args.print: if p is Print.path: # When the path given on the command line # explicitly specifies a test's output directory # (found in TEST.SAVED_OUTPUT_DIRECTORY), print # that; otherwise print the path to the test's # directory. b.add(p, (test.saved_output_directory and test.saved_output_directory or test.directory)) elif p is Print.diffs: continue elif p is Print.test_directory: b.add(p, test.directory) elif p is Print.test_status or p is Print.expected_result: b.add(p, test.expected_result) elif p is Print.test_host_names or p is Print.host_names: b.add(p, test.host_names, string=lambda host_names, sep: sep + ",".join(host_names)) elif p is Print.test_kind or p is Print.kind: b.add(p, test.kind) elif p is Print.test_name: b.add(p, test.name) elif p is Print.output_directory: b.add(p, test.output_directory) elif p is Print.result: b.add(p, result_cache.result(p)) elif p is Print.issues: b.add(p, result_cache.result(p).issues) elif p is Print.testing_directory: b.add(p, test.testing_directory()) elif p is Print.saved_output_directory: b.add(p, test.saved_output_directory) elif p is Print.test_scripts or p is Print.scripts: b.add(p, [{ "host": h, "script": s} for h, s in test.host_script_tuples], string=lambda scripts, sep: sep + ",".join([script["host"] + ":" + script["script"] for script in scripts])) elif p is Print.baseline_directory: b.add(p, baseline and test.name in baseline and baseline[test.name].directory or None) elif p is Print.baseline_output_directory: b.add(p, baseline and test.name in baseline and baseline[test.name].output_directory or None) elif p is Print.start_time: b.add(p, result_cache.grub(r"starting debug log at (.*)$")) elif p is Print.end_time: b.add(p, result_cache.grub(r"ending debug log at (.*)$")) elif p is Print.runtime: b.add(p, result_cache.grub(r": stop testing .* after (.*) second", float)) elif p is Print.boot_time: b.add(p, result_cache.grub(r": stop booting domains after (.*) second", float)) elif p is Print.script_time: b.add(p, result_cache.grub(r": stop running scripts .* after (.*) second", float)) elif p is Print.total_time: b.add(p, result_cache.grub(r": stop processing test .* after (.*) second", float)) else: raise Exception("unhandled print option %s" % p) if Print.diffs in args.print: result = result_cache.result(Print.diffs) for domain in result.diffs: b.add(Print.diffs, domain, result.diffs[domain], string=(lambda diff, sep: diff and (sep and "\n" or "") + "\n".join(diff) or "")) b.flush()
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_total_time = None # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) with logger.time("processing test %s", test_prefix) as test_total_time: ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Be lazy when gathering the results, don't run the sanitizer # or diff. Let post.mortem figure out if the test finished. old_result = post.mortem(test, args, domain_prefix=domain_prefix, quick=True, finished=None) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) return if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create just the OUTPUT/ directory; if it already exists, # move any contents to BACKUP/. Do it file-by-file so that, # at no point, the OUTPUT/ directory is missing (having an # OUTPUT/ directory implies the test was started). # # Don't create the path. If the parent directory is missing, # this will fail. # # By backing up each test just before it is started, a trail # of what tests were attempted during each run is created. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a test # is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the time # taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is not # under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileNotFoundError: # Bail, something is messed up (for instance the parent directory doesn't exist). return except FileExistsError: backup_directory = os.path.join(args.backup_directory, test.name) logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, copy it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug log # writing to that directory; include timing for this test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join(test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: logger.exception("timeout while booting domains") # Bail before RESULT is written - being unable to # boot the domains is a disaster. The test is # UNRESOLVED. return # Run the scripts directly with logger.time( "running scripts %s", " ".join(("%s:%s" % (host, script)) for host, script in test.host_script_tuples)) as test_script_time: with tcpdump.Dump(logger, domain_prefix, test.output_directory, [ test_domain.domain for test_domain in test_domains.values() ], enable=args.tcpdump): try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join( test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.output(open(output, "w")) try: for host, script in test.host_script_tuples: if args.stop_at == script: logger.error( "stopping test run at (before executing) script %s", script) break test_domain = test_domains[host] test_domain.read_file_run(script) except pexpect.TIMEOUT as e: # A test ending with a timeout is still a # finished test. Analysis of the results # will detect this and flag it as a fail. logger.error( "**** timeout out while running script %s ****", script) finally: # Close the redirected test-result log files for test_domain in test_domains.values(): logfile = test_domain.console.output() logfile.close() # Always disconnect from the test domains. logger.info("closing all test domains") for test_domain in test_domains.values(): logfile = test_domain.console.output() if logfile: logfile.close() test_domain.close() # The test finished. Aborts such as a failed boot, or a timeout, # will skip all the below. result = post.mortem(test, args, domain_prefix=domain_prefix, finished=True, update=True) logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.issues and " ", result.issues, suffix) # Since the the test finished (resolved in POSIX terminology)", # emit enough JSON to fool scripts like pluto-testlist-scan.sh. # # A test that timed-out or crashed, isn't considered resolved. # # A more robust way of doing this would be to mark each of the # console logs as complete as it is closed. # # More detailed information can be extracted from the debug.log. RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.status, jsonutil.result.result: result, jsonutil.result.issues: result.issues, jsonutil.result.hosts: test.host_names, jsonutil.result.time: jsonutil.ftime(test_total_time.start), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), jsonutil.result.total_time: round(test_total_time.seconds(), 1), } j = jsonutil.dumps(RESULT) logger.info("filling '%s' with json: %s", test.result_file(), j) with open(test.result_file(), "w") as f: f.write(j) f.write("\n") test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def results(logger, tests, baseline, args, result_stats): failures = 0 unresolved = 0 passed = 0 nr = 0 for test in tests: nr = nr + 1 publish.json_status(logger, args, "rebuilding %s (test %d of %d)" % (test.name, nr, len(tests))) # If debug logging is enabled this will provide fine grained # per-test timing. with logger.debug_time("processing test %s", test.name): # Filter out tests that are being ignored? ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) continue # Filter out test results that are being skipped. # # XXX: In the default case (skip=[UNTESTED]) this should # be cheap (does OUTPUT/ exist?). It isn't, instead a # full post-mortem analysis is performed. # # This is noticeable when printing static test value such # as the test's name takes far longer than one would # expect. result = post.mortem(test, args, baseline=baseline, output_directory=test.saved_output_directory, quick=args.quick) if args.update: result.save() if args.skip: if printer.Print.RESULT in args.print \ and skip.result(logger, args, result): result_stats.add_skipped(result) continue result_stats.add_result(result) if result.resolution in [post.Resolution.PASSED, post.Resolution.UNTESTED, post.Resolution.UNSUPPORTED]: passed = passed + 1 elif result.resolution in [post.Resolution.UNRESOLVED]: unresolved = unresolved + 1 else: failures = failures + 1 publish.test_files(logger, args, result) publish.test_output_files(logger, args, result) publish.json_result(logger, args, result) if baseline and test in baseline \ and not result.issues.crashed(): # Since there is a baseline and the test didn't crash # limit what is printed to just those where the # baseline's result is different. # # Note that, this skips baseline-different - where the # baseline failed for a different reason. if {post.Issues.BASELINE_FAILED, post.Issues.BASELINE_PASSED}.isdisjoint(result.issues): continue b = args.json and printer.JsonBuilder(sys.stdout) or printer.TextBuilder(sys.stdout) printer.build_result(logger, result, baseline, args, args.print, b) publish.json_results(logger, args) publish.json_summary(logger, args) publish.json_status(logger, args, "finished") # exit code if args.exit_ok: return 0 elif unresolved: return 125 # 'git bisect' magic for don't know elif failures: return 1 else: return 0
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_total_time = None # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) with logger.time("processing test %s", test_prefix) as test_total_time: ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Be lazy when gathering the results, don't run the sanitizer # or diff. Let post.mortem figure out if the test finished. old_result = post.mortem(test, args, domain_prefix=domain_prefix, quick=True, finished=None) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) return if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create just the OUTPUT/ directory; if it already exists, # move any contents to BACKUP/. Do it file-by-file so that, # at no point, the OUTPUT/ directory is missing (having an # OUTPUT/ directory implies the test was started). # # Don't create the path. If the parent directory is missing, # this will fail. # # By backing up each test just before it is started, a trail # of what tests were attempted during each run is created. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a test # is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the time # taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is not # under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileNotFoundError: # Bail, something is messed up (for instance the parent directory doesn't exist). return except FileExistsError: backup_directory = os.path.join(args.backup_directory, test.name) logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, copy it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug log # writing to that directory; include timing for this test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join(test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: logger.exception("timeout while booting domains") # Bail before RESULT is written - being unable to # boot the domains is a disaster. The test is # UNRESOLVED. return # Run the scripts directly with logger.time("running scripts %s", " ".join(("%s:%s" % (host, script)) for host, script in test.host_script_tuples)) as test_script_time: with tcpdump.Dump(logger, domain_prefix, test.output_directory, [test_domain.domain for test_domain in test_domains.values()], enable=args.tcpdump): try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join(test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.output(open(output, "w")) try: for host, script in test.host_script_tuples: if args.stop_at == script: logger.error("stopping test run at (before executing) script %s", script) break test_domain = test_domains[host] test_domain.read_file_run(script) except pexpect.TIMEOUT as e: # A test ending with a timeout is still a # finished test. Analysis of the results # will detect this and flag it as a fail. logger.error("**** timeout out while running script %s ****", script) finally: # Close the redirected test-result log files for test_domain in test_domains.values(): logfile = test_domain.console.output() logfile.close() # Always disconnect from the test domains. logger.info("closing all test domains") for test_domain in test_domains.values(): logfile = test_domain.console.output() if logfile: logfile.close() test_domain.close() # The test finished. Aborts such as a failed boot, or a timeout, # will skip all the below. result = post.mortem(test, args, domain_prefix=domain_prefix, finished=True, update=True) logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.issues and " ", result.issues, suffix) # Since the the test finished (resolved in POSIX terminology)", # emit enough JSON to fool scripts like pluto-testlist-scan.sh. # # A test that timed-out or crashed, isn't considered resolved. # # A more robust way of doing this would be to mark each of the # console logs as complete as it is closed. # # More detailed information can be extracted from the debug.log. RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.status, jsonutil.result.result: result, jsonutil.result.issues: result.issues, jsonutil.result.hosts: test.host_names, jsonutil.result.time: jsonutil.ftime(test_total_time.start), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), jsonutil.result.total_time: round(test_total_time.seconds(), 1), } j = jsonutil.dumps(RESULT) logger.info("filling '%s' with json: %s", test.result_file(), j) with open(test.result_file(), "w") as f: f.write(j) f.write("\n") test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_post_time = None old_result = None backup_directory = os.path.join(args.backup_directory, test.name) # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) publish.json_status(logger, args, "processing %s" % test_prefix) with logger.time("processing test %s", test_prefix): # always perform post mortem on the test directory. try: ignored, details = ignore.test(logger, args, test) if ignored: # If there is any pre-existing output move it to # backup. Otherwise it looks like the test was run # when it wasn't (and besides, the output is no longer # applicable). # # The isdir() test followed by a simple move, while # racy, should be good enough. if os.path.isdir(test.output_directory): logger.info("moving '%s' to '%s'", test.output_directory, backup_directory) os.makedirs(os.path.dirname(backup_directory), exist_ok=True) os.rename(test.output_directory, backup_directory) result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Be lazy when gathering the results, don't run the # sanitizer or diff. Let post.mortem figure out if the # test finished. old_result = post.mortem(test, args, domain_prefix=domain_prefix, quick=True) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) return if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create just the OUTPUT/ directory; if it already exists, # move any contents to BACKUP/. Do it file-by-file so # that, at no point, the OUTPUT/ directory is missing # (having an OUTPUT/ directory implies the test was # started). # # Don't create the path. If the parent directory is # missing, this will fail. # # By backing up each test just before it is started, a # trail of what tests were attempted during each run is # created. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a # test is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the # time taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is # not under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileNotFoundError: # Bail, something is messed up (for instance the parent directory doesn't exist). return except FileExistsError: logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, move it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug # log writing to that directory; include timing for this # test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join( test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: # boot the domains with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains( logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: logger.exception("timeout while booting domains") # Bail. Being unable to boot the domains is a # disaster. The test is UNRESOLVED. return # Run the scripts directly with logger.time( "running scripts %s", " ".join(("%s:%s" % (host, script)) for host, script in test.host_script_tuples)) as test_script_time: with tcpdump.Dump( logger, domain_prefix, test.output_directory, [ test_domain.domain for test_domain in test_domains.values() ], enable=args.tcpdump): try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join( test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.output(open(output, "w")) for host, script in test.host_script_tuples: if args.stop_at == script: logger.error( "stopping test run at (before executing) script %s", script) break test_domain = test_domains[host] try: test_domain.read_file_run(script) except BaseException as e: # if there is an exception, write # it to the console test_domain.console.child.logfile.write( "\n*** exception running script %s ***\n%s" % (script, str(e))) raise for test_domain in test_domains.values(): test_domain.console.child.logfile.write( post.DONE) except pexpect.TIMEOUT as e: # A test ending with a timeout gets # treated as unresolved. Timeouts # shouldn't occur so human intervention # is required. logger.error( "**** timeout out while running test script %s ****", script) finally: # Close the redirected test-result log files logger.info( "closing all the test domain log files") for test_domain in test_domains.values(): outfile = test_domain.console.output() outfile.close() # Always disconnect from the test domains. logger.info("closing all the test domains") for test_domain in test_domains.values(): test_domain.close() finally: with logger.time("post-mortem %s", test_prefix): # The test finished; it is assumed that post.mortem # can deal with a crashed test. result = post.mortem(test, args, domain_prefix=domain_prefix, update=True) logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.issues and " ", result.issues, suffix) # If the test was run (a fresh run would delete RESULT) # and finished (resolved in POSIX terminology), emit # enough JSON to fool scripts like pluto-testlist-scan.sh. # # A test that timed-out or crashed, isn't considered # resolved so the file isn't created. # # XXX: this should go away. if not os.path.isfile(test.result_file()) \ and result.resolution.isresolved(): RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.status, jsonutil.result.result: result, jsonutil.result.issues: result.issues, jsonutil.result.hosts: test.host_names, jsonutil.result.time: jsonutil.ftime(test_runtime.start), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), jsonutil.result.total_time: round(test_runtime.seconds(), 1), } j = jsonutil.dumps(RESULT) logger.debug("filling '%s' with json: %s", test.result_file(), j) with open(test.result_file(), "w") as f: f.write(j) f.write("\n") # Do this after RESULT is created so it too is published. publish.everything(logger, args, result) publish.json_status(logger, args, "finished %s" % test_prefix) test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) # test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def results(logger, tests, baseline, args, result_stats): for test in tests: # If debug logging is enabled this will provide fine grained # per-test timing. with logger.debug_time("processing test %s", test.name): # Filter out tests that are being ignored? ignored, include_ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) if not include_ignored: continue # Filter out tests that have not been run result = None if not ignored and Print.result in args.print: result = post.mortem( test, args, baseline=baseline, output_directory=test.saved_output_directory, test_finished=None, skip_sanitize=args.quick or args.quick_sanitize, skip_diff=args.quick or args.quick_diff, update=args.update, update_sanitize=args.update_sanitize, update_diff=args.update_diff) if skip.result(logger, args, result): continue if not result: result_stats.add_ignored(test, str(result)) else: result_stats.add_result(result) sep = "" # Print the test's name/path if not args.prefix: # By default: when the path given on the command line # explicitly specifies a test's output directory # (found in TEST.SAVED_OUTPUT_DIRECTORY), print that; # otherwise print the path to the test's directory. print(sep, end="") print((test.saved_output_directory and test.saved_output_directory or test.directory), end="") sep = " " else: # Print the test name/path per command line if args.prefix is Prefix.name: print(sep, end="") print(test.name, end="") sep = " " elif args.prefix is Prefix.test_directory: print(sep, end="") print(test.directory, end="") sep = " " elif args.prefix is Prefix.output_directory: print(sep, end="") print((test.saved_output_directory and test.saved_output_directory or test.output_directory), end="") sep = " " if ignored: print(sep, end="") print("ignored", ignored, end="") sep = " " for p in args.print: if p in [Print.diffs]: continue elif p is Print.directory: print(sep, end="") print(test.directory, end="") sep = " " elif p is Print.expected_result: print(sep, end="") print(test.expected_result, end="") sep = " " elif p is Print.full_name: print(sep, end="") print(test.full_name, end="") sep = " " elif p is Print.host_names: for name in test.host_names: print(sep, end="") print(name, end="") sep = "," sep = " " elif p is Print.kind: print(sep, end="") print(test.kind, end="") sep = " " elif p is Print.name: print(sep, end="") print(test.name, end="") sep = " " elif p is Print.output_directory: print(sep, end="") print(test.output_directory, end="") sep = " " elif p is Print.result: if result: print(sep, end="") if result.errors: print(result, result.errors, end="") else: print(result, end="") sep = " " elif p is Print.sanitize_directory: print(sep, end="") print(test.sanitize_directory, end="") sep = " " elif p is Print.saved_output_directory: print(sep, end="") print(test.saved_output_directory, end="") sep = " " elif p is Print.scripts: for host, script in test.host_script_tuples: print(sep, end="") print("%s:%s" % (host, script), end="") sep = "," sep = " " else: print() print("unknown print option", p) print() if Print.diffs in args.print and result: for domain in result.diffs: for line in result.diffs[domain]: if line: print(line) sys.stdout.flush()
def results(logger, tests, baseline, args, result_stats): for test in tests: # If debug logging is enabled this will provide fine grained # per-test timing. with logger.debug_time("processing test %s", test.name): # Filter out tests that are being ignored? ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) continue # Filter out tests that have not been run result_cache = ResultCache(logger, test, args, baseline, result_stats) if args.skip: result = result_cache.result("skip") if skip.result(logger, args, result): result_stats.add_skipped(result) continue b = args.json and JsonOutput() or PrintOutput() # Print the test's name/path for p in args.print: if p is Print.path: # When the path given on the command line # explicitly specifies a test's output directory # (found in TEST.SAVED_OUTPUT_DIRECTORY), print # that; otherwise print the path to the test's # directory. b.add(p, (test.saved_output_directory and test.saved_output_directory or test.directory)) elif p is Print.diffs: continue elif p is Print.test_directory: b.add(p, test.directory) elif p is Print.test_status or p is Print.expected_result: b.add(p, test.expected_result) elif p is Print.test_host_names or p is Print.host_names: b.add(p, test.host_names, string=lambda host_names, sep: sep + ",".join( host_names)) elif p is Print.test_kind or p is Print.kind: b.add(p, test.kind) elif p is Print.test_name: b.add(p, test.name) elif p is Print.output_directory: b.add(p, test.output_directory) elif p is Print.result: b.add(p, result_cache.result(p)) elif p is Print.issues: b.add(p, result_cache.result(p).issues) elif p is Print.testing_directory: b.add(p, test.testing_directory()) elif p is Print.saved_output_directory: b.add(p, test.saved_output_directory) elif p is Print.test_scripts or p is Print.scripts: b.add(p, [{ "host": h, "script": s } for h, s in test.host_script_tuples], string=lambda scripts, sep: sep + ",".join([ script["host"] + ":" + script["script"] for script in scripts ])) elif p is Print.baseline_directory: b.add( p, baseline and test.name in baseline and baseline[test.name].directory or None) elif p is Print.baseline_output_directory: b.add( p, baseline and test.name in baseline and baseline[test.name].output_directory or None) elif p is Print.start_time: b.add(p, result_cache.grub(r"starting debug log at (.*)$")) elif p is Print.end_time: b.add(p, result_cache.grub(r"ending debug log at (.*)$")) elif p is Print.runtime: b.add( p, result_cache.grub( r": stop testing .* after (.*) second", float)) elif p is Print.boot_time: b.add( p, result_cache.grub( r": stop booting domains after (.*) second", float)) elif p is Print.script_time: b.add( p, result_cache.grub( r": stop running scripts .* after (.*) second", float)) elif p is Print.total_time: b.add( p, result_cache.grub( r": stop processing test .* after (.*) second", float)) else: raise Exception("unhandled print option %s" % p) if Print.diffs in args.print: result = result_cache.result(Print.diffs) for domain in result.diffs: b.add( Print.diffs, domain, result.diffs[domain], string=(lambda diff, sep: diff and (sep and "\n" or "") + "\n".join(diff) or "")) b.flush()
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_post_time = None old_result = None backup_directory = os.path.join(args.backup_directory, test.name) # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) publish.json_status(logger, args, "processing %s" % test_prefix) with logger.time("processing test %s", test_prefix): # Ignoring the test completely? # # So that there's no possible confusion over the test being # run; remove any pre-existing output. ignored, details = ignore.test(logger, args, test) if ignored: # The isdir() test followed by a simple move, while # racy, should be good enough. if os.path.isdir(test.output_directory): logger.info("moving '%s' to '%s'", test.output_directory, backup_directory) os.makedirs(os.path.dirname(backup_directory), exist_ok=True) os.rename(test.output_directory, backup_directory) result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Skip the test, leaving old results? # # For instance, during a test re-run, skip any tests that are # passing. # # The check below compares the test and expected output, # ignoring any previous test result. This way the results are # consistent with kvmresults.py which always reflects the # current sources. # # - modifying the expected output so that it no longer matches # the last result is a fail # # - modifying the expected output so that it matches the last # result is a pass old_result = post.mortem(test, args, domain_prefix=domain_prefix, quick=False) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) publish.everything(logger, args, old_result) return # Running the test ... # # From now on the test will be run so need to perform post # mortem. try: if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create just the OUTPUT/ directory. # # If the directory already exists, copy the contents # BACKUP/. Do it file-by-file so that, at no point, the # OUTPUT/ directory is missing (having an OUTPUT/ # directory implies the test was started). # # Don't try to create the path. If the parent directory # is missing, this and the entire script will crash. # Someone did something nasty like deleted the parent # directory. # # By backing up each test just before it is started, # leaves a trail of what tests were attempted during a # test run. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a # test is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the # time taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is # not under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileExistsError: logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, move it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug # log writing to that directory; include timing for this # test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join(test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: # boot the domains with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: # Bail. Being unable to boot the domains is a # disaster. The test is UNRESOLVED. logger.exception("timeout while booting domains") return except pexpect.EOF: # Bail. Being unable to attach to the domains # is a disaster. The test is UNRESOLVED. logger.exception("eof (disconnect) while booting domains") return # Run the scripts directly with logger.time("running scripts %s", " ".join(("%s:%s" % (host, script)) for host, script in test.host_script_tuples)) as test_script_time: with tcpdump.Dump(logger, domain_prefix, test.output_directory, [test_domain.domain for test_domain in test_domains.values()], enable=args.tcpdump): try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join(test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.output(open(output, "w")) for host, script in test.host_script_tuples: if args.stop_at == script: logger.error("stopping test run at (before executing) script %s", script) break test_domain = test_domains[host] try: test_domain.read_file_run(script) except pexpect.TIMEOUT as e: # A test ending with a timeout # gets treated as a FAIL. A # timeout while running a test # script is a sign that a command # hung. post_timeout = "%s %s:%s" % (post.TIMEOUT, host, script) logger.warning("*** %s ***" % post_timeout) test_domain.console.child.logfile.write("%s %s %s" % (post.LHS, post_timeout, post.RHS)) break except BaseException as e: # if there is an exception, write # it to the console test_domain.console.child.logfile.write("\n%s %s %s:%s %rhs\n%s" % (post.LHS, post.EXCEPTION, host, script, post.RHS, str(e))) raise for test_domain in test_domains.values(): test_domain.console.child.logfile.write(post.DONE) finally: # Close the redirected test-result log files logger.info("closing all the test domain log files") for test_domain in test_domains.values(): outfile = test_domain.console.output() outfile.close() # Always disconnect from the test domains. logger.info("closing all the test domains") for test_domain in test_domains.values(): test_domain.close() finally: with logger.time("post-mortem %s", test_prefix): # The test finished; it is assumed that post.mortem # can deal with a crashed test. result = post.mortem(test, args, domain_prefix=domain_prefix) logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.issues and " ", result.issues, suffix) result.save() # If the test was run (a fresh run would delete RESULT) # and finished (resolved in POSIX terminology), emit # enough JSON to fool scripts like pluto-testlist-scan.sh. # # A test that timed-out or crashed, isn't considered # resolved so the file isn't created. # # XXX: this should go away. result_file = os.path.join(test.output_directory, "RESULT") if not os.path.isfile(result_file) \ and result.resolution.isresolved(): RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.status, jsonutil.result.result: result, jsonutil.result.issues: result.issues, jsonutil.result.hosts: test.host_names, jsonutil.result.time: jsonutil.ftime(test_runtime.start), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), } j = jsonutil.dumps(RESULT) logger.debug("filling '%s' with json: %s", result_file, j) with open(result_file, "w") as f: f.write(j) f.write("\n") # Do this after RESULT is created so it too is published. publish.everything(logger, args, result) publish.json_status(logger, args, "finished %s" % test_prefix) test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) # test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")