def __init__(self, domain_prefix, host_name, test): self.test = test # Get the domain self.domain = virsh.Domain(host_name=host_name, domain_prefix=domain_prefix) self.logger = logutil.getLogger(domain_prefix, __name__, test.name, host_name) # A valid console indicates that the domain is up. self.console = self.domain.console()
def __init__(self, domain_prefix, host_name, test): self.test = test # Get the domain self.domain = virsh.Domain(host_name=host_name, domain_prefix=domain_prefix) self.logger = logutil.getLogger(domain_prefix, __name__, test.name, host_name) # A valid console indicates that the domain is up. self.console = self.domain.console()
def __init__(self, directory, kind, expected_result, old_output_directory=None): self.logger = logutil.getLogger(__name__) # basics self.kind = kind self.expected_result = expected_result # The test's name is always identical to the test's directory # name (aka basename). However, since directory could be # relative (for instance "." or "./..") it first needs to be # converted to absolute before the basename can be extracted. directory = os.path.abspath(directory) self.name = os.path.basename(directory) self.full_name = "test " + self.name # Construct a relative directory path such that it always # contains the actual test directory name. For instance: "." # gets rewritten as ../TEST; and ".." gets rewritten as # "../../TEST". This prevents optuse paths appearing in the # output. For instance, it prevents "kvmresult.py ../OUTPUT" # printing just ".. passed". self.directory = os.path.join(os.path.relpath(os.path.dirname(directory)), self.name) # Need to juggle two directories: first, there is the # directory containing the OLD output from a previous test # run; and second, there is the directory that will contain # the [NEW] output from the next test run. It makes a # difference when an old output directory is explicitly # specified. For instance, "kvmresults.py test/OUTPUT.OLD/" # should output the results for that directory and not # "test/OUTPUT/". self.output_directory = os.path.join(self.directory, "OUTPUT") self.result_file = os.path.join(self.output_directory, "RESULT") self.old_output_directory = old_output_directory or self.output_directory # will be filled in later self.domains = None self.initiators = None
def __init__(self, domain_name, test): self.full_name = "test " + test.name + " " + domain_name self.test = test # Get the domain self.domain = virsh.Domain(domain_name) self.logger = logutil.getLogger(__name__, test.name, domain_name) # A valid console indicates that the domain is up. self.console = self.domain.console()
def __init__(self, domain_name): # Use the term "domain" just like virsh self.name = domain_name self.virsh_console = None # Logger? self.logger = logutil.getLogger(__name__, self.name) self.debug_handler = None self.logger.debug("domain created")
def __init__(self, domain_name, test): self.full_name = "test " + test.name + " " + domain_name self.test = test # Get the domain self.domain = virsh.Domain(domain_name) self.logger = logutil.getLogger(__name__, test.name, domain_name) # A valid console indicates that the domain is up. self.console = self.domain.console()
def __init__(self, domain_name): # Use the term "domain" just like virsh self.name = domain_name self.virsh_console = None # Logger? self.logger = logutil.getLogger(__name__, self.name) self.debug_handler = None self.logger.debug("domain created")
def main(): parser = argparse.ArgumentParser(description="Run tests") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="a testsuite directory, a TESTLIST file, or a list of test directories") testsuite.add_arguments(parser) runner.add_arguments(parser) logutil.add_arguments(parser) skip.add_arguments(parser) ignore.add_arguments(parser) publish.add_arguments(parser) # These three calls go together args = parser.parse_args() logutil.config(args, sys.stdout) logger = logutil.getLogger("kvmrunner") logger.info("Options:") logger.info(" directories: %s", args.directories) logger.info(" verbose: %s", args.verbose) testsuite.log_arguments(logger, args) runner.log_arguments(logger, args) logutil.log_arguments(logger, args) skip.log_arguments(logger, args) ignore.log_arguments(logger, args) publish.log_arguments(logger, args) tests = testsuite.load_testsuite_or_tests(logger, args.directories, args, log_level=logutil.INFO) if not tests: logger.error("test or testsuite directory invalid: %s", args.directories) return 1 test_stats = stats.Tests() result_stats = stats.Results() try: exit_code = 0 logger.info("run started at %s", timing.START_TIME) runner.run_tests(logger, args, tests, test_stats, result_stats) except KeyboardInterrupt: logger.exception("**** interrupted ****") exit_code = 1 test_stats.log_details(args.verbose and logger.info or logger.debug, header="final stat details:", prefix=" ") result_stats.log_details(logger.info, header="final test details:", prefix=" ") test_stats.log_summary(logger.info, header="final test stats:", prefix=" ") result_stats.log_summary(logger.info, header="final test results:", prefix=" ") end_time = datetime.now() logger.info("run finished at %s after %s", end_time, end_time - timing.START_TIME) return exit_code
def main(): parser = argparse.ArgumentParser(description="Run tests") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="a testsuite directory, a TESTLIST file, or a list of test directories") testsuite.add_arguments(parser) runner.add_arguments(parser) post.add_arguments(parser) logutil.add_arguments(parser) skip.add_arguments(parser) ignore.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmrunner") logger.info("Options:") logger.info(" directories: %s", args.directories) logger.info(" verbose: %s", args.verbose) testsuite.log_arguments(logger, args) runner.log_arguments(logger, args) post.log_arguments(logger, args) logutil.log_arguments(logger, args) skip.log_arguments(logger, args) ignore.log_arguments(logger, args) tests = testsuite.load_testsuite_or_tests(logger, args.directories, args, log_level=logutil.INFO) if not tests: logger.error("test or testsuite directory invalid: %s", args.directories) return 1 test_stats = stats.Tests() result_stats = stats.Results() try: exit_code = 0 logger.info("run started at %s", timing.START_TIME) runner.run_tests(logger, args, tests, test_stats, result_stats) except KeyboardInterrupt: logger.exception("**** interrupted ****") exit_code = 1 test_stats.log_details(args.verbose and logger.info or logger.debug, header="final stat details:", prefix=" ") result_stats.log_details(logger.info, header="final test details:", prefix=" ") test_stats.log_summary(logger.info, header="final test stats:", prefix=" ") result_stats.log_summary(logger.info, header="final test results:", prefix=" ") end_time = datetime.now() logger.info("run finished at %s after %s", end_time, end_time - timing.START_TIME) return exit_code
def _process_test_queue(domain_prefix, test_queue, args, done, result_stats, boot_executor): logger = logutil.getLogger(domain_prefix, __name__) try: while True: task = test_queue.get(block=False) _process_test(domain_prefix, task.test, args, result_stats, task.count, task.total, boot_executor) except queue.Empty: None finally: done.release()
def _process_test_queue(domain_prefix, test_queue, args, done, test_stats, result_stats, boot_executor): logger = logutil.getLogger(domain_prefix, __name__) try: while True: task = test_queue.get(block=False) _process_test(domain_prefix, task.test, args, test_stats, result_stats, task.count, task.total, boot_executor) except queue.Empty: None finally: done.release()
def __init__(self, host_name, domain_prefix="", domain_name=None): # Use the term "domain" just like virsh self.prefix = domain_prefix self.name = domain_name or (domain_prefix + host_name) self.host_name = host_name self.virsh_console = None # Logger? self.logger = logutil.getLogger(domain_prefix, __name__, host_name) self.debug_handler = None self.logger.debug("domain created")
def __init__(self, host_name, domain_prefix="", domain_name=None): # Use the term "domain" just like virsh self.prefix = domain_prefix self.name = domain_name or (domain_prefix + host_name) self.host_name = host_name self.virsh_console = None # Logger? self.logger = logutil.getLogger(domain_prefix, __name__, host_name) self.debug_handler = None self.logger.debug("domain created")
def run_test(test, args): # Lots of WITH/TRY blocks so things always clean up. # Time just this test logger = logutil.getLogger(__name__, test.name) with TestDomains(logger, test, args.prefix, testsuite.HOST_NAMES) as all_test_domains: try: # Python doesn't have an easy way to obtain an executor's # current jobs (futures) so track them using the JOBS map. # If there's a crash, any remaining members of JOBS are # canceled or killed in the finally block below. The # executor is cleaned up explicitly in the finally clause. executor = futures.ThreadPoolExecutor(max_workers=args.workers) jobs = {} logger.info("starting test") run_test_on_executor(executor, jobs, logger, test, all_test_domains) logger.info("finishing test") finally: # Control-c, timeouts, along with any other crash, and # even a normal exit, all end up here! # Start with a list of jobs still in the queue; one or # more of them may be running. done, not_done = futures.wait(jobs, timeout=0) logger.debug("jobs done %s not done %s", done, not_done) # First: cancel all outstanding jobs (otherwise killing # one job would just result in the next job starting). # Calling cancel() on running jobs has no effect so need # to stop them some other way; ulgh! not_canceled = set() for job in not_done: logger.info("trying to cancel job %s on %s", job, jobs[job]) if job.cancel(): logger.info("job %s on %s canceled", job, jobs[job]) else: logger.info("job %s on %s did not cancel", job, jobs[job]) not_canceled.add(job) # Second: cause any un-canceled jobs (presumably they are # running) to crash. The crash() call, effectively, pulls # the rug out from under the code interacting with the # domain. for job in not_canceled: logger.info("trying to crash job %s on %s", job, jobs[job]) jobs[job].crash() # finally shutdown the executor; it will reap all the # crashed jobs. executor.shutdown()
def run_test(test, max_workers=1): # Lots of WITH/TRY blocks so things always clean up. # Time just this test logger = logutil.getLogger(__name__, test.name) logger.info("starting test") with TestDomains(logger, test, testsuite.DOMAIN_NAMES) as all_test_domains: try: # Python doesn't have an easy way to obtain an executor's # current jobs (futures) so track them using the JOBS map. # If there's a crash, any remaining members of JOBS are # canceled or killed in the finally block below. The # executor is cleaned up explicitly in the finally clause. executor = futures.ThreadPoolExecutor(max_workers=max_workers) jobs = {} run_test_on_executor(executor, jobs, logger, test, all_test_domains) finally: # Control-c, timeouts, along with any other crash, and # even a normal exit, all end up here! logger.info("finishing test") # Start with a list of jobs still in the queue; one or # more of them may be running. done, not_done = futures.wait(jobs, timeout=0) logger.debug("jobs done %s not done %s", done, not_done) # First: cancel all outstanding jobs (otherwise killing # one job would just result in the next job starting). # Calling cancel() on running jobs has no effect so need # to stop them some other way; ulgh! not_canceled = set() for job in not_done: logger.info("trying to cancel job %s on %s", job, jobs[job]) if job.cancel(): logger.info("job %s on %s canceled", job, jobs[job]) else: logger.info("job %s on %s did not cancel", job, jobs[job]) not_canceled.add(job) # Second: cause any un-canceled jobs (presumably they are # running) to crash. The crash() call, effectively, pulls # the rug out from under the code interacting with the # domain. for job in not_canceled: logger.info("trying to crash job %s on %s", job, jobs[job]) jobs[job].crash() # finally shutdown the executor; it will reap all the # crashed jobs. executor.shutdown()
def __init__(self, test_directory, kind, expected_result, saved_test_output_directory=None, testsuite_output_directory=None): self.logger = logutil.getLogger(__name__) # basics self.kind = kind self.expected_result = expected_result # The test's name is always identical to the test directory's # name (aka basename). However, since TEST_DIRECTORY could be # relative (for instance "." or "./..") it first needs to be # made absolute before the basename can be extracted. test_directory = os.path.abspath(test_directory) # The test's name is the same as the directory's basename. self.name = os.path.basename(test_directory) self.full_name = "test " + self.name # Construct the test's relative directory path such that it # always contains the test directory name (i.e., the test # name) as context. For instance: "." gets rewritten as # ../<test>; and ".." gets rewritten as "../../<test>". This # ensures that displayed paths always include some context. # For instance, given "kvmresult.py .", "../<test> passed" # (and not ". passed") will be displayed. self.directory = os.path.join(os.path.relpath(os.path.dirname(test_directory)), self.name) # Directory where the next test run's output should be # written. If a common testsuite output directory was # specified, use that. self.output_directory = ( testsuite_output_directory and os.path.join(testsuite_output_directory, self.name) or os.path.join(self.directory, "OUTPUT")) self.result_file = os.path.join(self.output_directory, "RESULT") # Directory containing saved output from a previous test run. # If the test's output directory was explicitly specified, say # as a parameter to kvmrunner.py vis: # # kvmresults.py testing/pluto/<test>/OUTPUT.OLD # kvmresults.py testing/pluto/OUTPUT/<test> # # than that directory, and not the next output-directory, will # be passed in and saved here. Otherwise it is None, and the # OUTPUT_DIRECTORY should be used. self.saved_output_directory = saved_test_output_directory # will be filled in later self.domains = None self.initiators = None
def __init__(self, command, hostname=None, username=None, logger=None): # Need access to HOSTNAME. self.logger = logger or logutil.getLogger(__name__, hostname) self.basename = None self.hostname = hostname self.username = username self.prompt = compile_prompt(self.logger, hostname=hostname, username=username) # Create the child: configure -ve timeout parameters to act # like poll, and give all methods an explicit default of # TIMEOUT seconds; leave searchwindowsize set to the infinte # default so that expect patterns do not mysteriously fail. self.logger.debug("spawning '%s'", command) self.child = pexpect.spawnu(command, timeout=0) #This crashes inside of pexpect! #self.logger.debug("child is '%s'", self.child) # route low level output to the logger self.child.logfile_read = Debug(self.logger, "read <<%s>>>") self.child.logfile_send = Debug(self.logger, "send <<%s>>>")
def __init__(self, directory, kind, expected_result): self.logger = logutil.getLogger(__name__) # basics self.kind = kind self.expected_result = expected_result # name must match the directory's basename; since directory # could be ".", need to first convert it to an absolute path. self.name = os.path.basename(os.path.abspath(directory)) self.full_name = "test " + self.name # avoid "." as a directory, construct the sub-directory paths # using the parent's directory (don't abspath or relpath). self.directory = os.path.relpath(directory) if self.directory == ".": self.directory = os.path.join("..", self.name) self.output_directory = os.path.join(self.directory, "OUTPUT") self.result_file = os.path.join(self.output_directory, "RESULT") # will be filled in later self.domains = None self.initiators = None
def _run_test(domain_prefix, test, args, boot_executor): # Time just this test logger = logutil.getLogger(domain_prefix, __name__, test.name) with TestDomains(domain_prefix, testsuite.HOST_NAMES, test, logger) as all_test_domains: logger.info("starting test") test_domains = set() for host_name in test.host_names: test_domains.add(all_test_domains[host_name]) logger.debug("test domains: %s", strset(test_domains)) unused_domains = set() for test_domain in all_test_domains.values(): if test_domain not in test_domains: unused_domains.add(test_domain) logger.debug("unused domains: %s", strset(unused_domains)) boot_test_domains(logger, test_domains, unused_domains, boot_executor) # re-direct the test-result log file for test_domain in test_domains: output = os.path.join(test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.output(open(output, "w")) # Run the scripts directly logger.info("running scripts: %s", " ".join(str(script) for script in test.scripts)) for script in test.scripts: test_domain = all_test_domains[script.host_name] test_domain.read_file_run(script.script) # Close the redirected test-result log files for test_domain in test_domains: logfile = test_domain.console.output() logfile.close() logger.info("finishing test")
def __init__(self, directory, kind, expected_result, old_output_directory=None): self.logger = logutil.getLogger(__name__) # basics self.kind = kind self.expected_result = expected_result # The test's name is always identical to the test's directory # name (aka basename). However, since directory could be # relative (for instance "." or "./..") it first needs to be # converted to absolute before the basename can be extracted. directory = os.path.abspath(directory) self.name = os.path.basename(directory) self.full_name = "test " + self.name # Construct a relative directory path such that it always # contains the actual test directory name. For instance: "." # gets rewritten as ../TEST; and ".." gets rewritten as # "../../TEST". This prevents optuse paths appearing in the # output. For instance, it prevents "kvmresult.py ../OUTPUT" # printing just ".. passed". self.directory = os.path.join( os.path.relpath(os.path.dirname(directory)), self.name) # Need to juggle two directories: first, there is the # directory containing the OLD output from a previous test # run; and second, there is the directory that will contain # the [NEW] output from the next test run. It makes a # difference when an old output directory is explicitly # specified. For instance, "kvmresults.py test/OUTPUT.OLD/" # should output the results for that directory and not # "test/OUTPUT/". self.output_directory = os.path.join(self.directory, "OUTPUT") self.result_file = os.path.join(self.output_directory, "RESULT") self.old_output_directory = old_output_directory or self.output_directory # will be filled in later self.domains = None self.initiators = None
def main(): parser = argparse.ArgumentParser( description= "list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]", epilog= "By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored). While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately). If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified." ) parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument( "--quick", action="store_true", help= ("Use the previously generated '.console.txt' and '.console.diff' files" )) parser.add_argument( "--quick-sanitize", action="store_true", help=("Use the previously generated '.console.txt' file")) parser.add_argument( "--quick-diff", action="store_true", help=("Use the previously generated '.console.diff' file")) parser.add_argument( "--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files")) parser.add_argument("--update-sanitize", action="store_true", help=("Update the '.console.txt' file")) parser.add_argument("--update-diff", action="store_true", help=("Update the '.console.diff' file")) parser.add_argument("--print-directory", action="store_true") parser.add_argument("--print-name", action="store_true") # parser.add_argument("--print-result", action="store_true") parser.add_argument("--print-diff", action="store_true") parser.add_argument("--print-args", action="store_true") parser.add_argument("--print-output-directory", action="store_true") parser.add_argument("--list-ignored", action="store_true", help="include ignored tests in the list") parser.add_argument("--list-untested", action="store_true", help="include untested tests in the list") parser.add_argument( "directories", metavar="TEST-DIRECTORY", nargs="+", help=("Either a testsuite (only one) or test directory")) # Note: this argument serves as documentation only. The # TEST-DIRECTORY argument always consume all remaining parameters. parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?", help=("An optional testsuite directory containing" " results from a previous test run")) post.add_arguments(parser) testsuite.add_arguments(parser) logutil.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmresults") # The option -vvvvvvv is a short circuit for these; make # re-ordering easy by using V as a counter. v = 0 args.print_directory = args.print_directory or args.verbose > v args.print_name = args.print_name or args.verbose > v v += 1 args.print_output_directory = args.print_output_directory or args.verbose > v v += 1 args.list_untested = args.list_untested or args.verbose > v v += 1 args.list_ignored = args.list_ignored or args.verbose > v v += 1 v += 1 args.print_args = args.print_args or args.verbose > v if args.print_args: post.log_arguments(logger, args) testsuite.log_arguments(logger, args) logutil.log_arguments(logger, args) return 1 # Is the last argument some sort of baseline? If it is, pre-load # it. # # XXX: Should also support something like --baseline-testsuite and # --baseline-output parameters. baseline = None if len(args.directories) > 1: # If there is more than one directory then, perhaps, the last # one is a baseline. A baseline might be: a complete # testsuite snapshot; or just output saved as # testing/pluto/OUTPUT/TESTDIR. baseline = testsuite.load(logger, args.directories[-1], args, error_level=logutil.DEBUG) if baseline: # discard the last argument as consumed above. logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1]) args.directories.pop() tests = testsuite.load_testsuite_or_tests(logger, args.directories, args) # And check if not tests: logger.error("Invalid testsuite or test directories") return 1 # When an explicit list of directories was specified always print # all of them (otherwise, tests seem to get lost). if isinstance(tests, list): args.list_untested = True for test in tests: # Produce separate runtimes for each test. with logutil.TIMER: logger.debug("start processing test %s", test.name) # Filter out tests that are being ignored? ignore = testsuite.ignore(test, args) if ignore and not args.list_ignored: continue # Filter out tests that have not been run? result = None if not ignore: result = post.mortem( test, args, baseline=baseline, output_directory=test.saved_output_directory, skip_sanitize=args.quick or args.quick_sanitize, skip_diff=args.quick or args.quick_diff, update=args.update, update_sanitize=args.update_sanitize, update_diff=args.update_diff) if not result and not args.list_untested: continue sep = "" # Print the test's name/path if not args.print_directory and not args.print_name and not args.print_output_directory: # By default: when the path given on the command line # explicitly specifies a test's output directory # (found in TEST.SAVED_OUTPUT_DIRECTORY), print that; # otherwise print the path to the test's directory. print(sep, end="") print((test.saved_output_directory and test.saved_output_directory or test.directory), end="") sep = " " else: # Print the test name/path per command line if args.print_name: print(sep, end="") print(test.name, end="") sep = " " if args.print_directory: print(sep, end="") print(test.directory, end="") sep = " " if args.print_output_directory: print(sep, end="") print((test.saved_output_directory and test.saved_output_directory or test.output_directory), end="") sep = " " if ignore: print(sep, end="") print("ignored", ignore, end="") sep = " " print(sep, end="") if result.errors: print(result, result.errors, end="") else: print(result, end="") sep = " " print() if args.print_diff and result: for domain in result.diffs: for line in result.diffs[domain]: if line: print(line) sys.stdout.flush() logger.debug("stop processing test %s", test.name) return 0
def main(): # If SIGUSR1, backtrace all threads; hopefully this is early # enough. faulthandler.register(signal.SIGUSR1) parser = argparse.ArgumentParser(description="Connect to and run a shell command on a virtual machine domain", epilog="If no command or file is specified an interactive shell is created. SIGUSR1 will dump all thread stacks") parser.add_argument("--timeout", type=argutil.timeout, default=None, help=("maximum runtime for the command" "; -1 for no timeout" " (default: no timeout)")) argutil.add_redirect_argument(parser, "re-direct console output from stdout to %(metavar)s", "--output", "-o", default=sys.stdout, metavar="FILE") parser.add_argument("--chdir", default=None, action="store", metavar="PATH", help=("first change directory to %(metavar)s on the remote" " domain and update prompt-match logic to expect" " that directory" "; an absolute %(metavar)s is used unmodified" "; a relative %(metavar)s, which is interpreted" " as relative to the current local working directory" ", is converted to an absolute remote path before use" " (default: leave directory unchanged)")) parser.add_argument("--boot", default=None, action="store", type=Boot, choices=[e for e in Boot], help=("force the domain to boot" "; 'cold': power-off any existing domain" "; 'warm': reboot any existing domain" " (default: leave existing domain running)")) parser.add_argument("--shutdown", default=False, action="store_true", help=("on-completion shut down the domain" " (default: leave the domain running)")) parser.add_argument("--mode", default=None, choices=set(["interactive", "batch"]), help=("enter mode" " (default: if there is no command enter interactive mode)")) parser.add_argument("--host-name", default=None, help="The virtual machine's host name") parser.add_argument("domain", action="store", metavar="DOMAIN", help="virtual machine (domain) to connect to") parser.add_argument("command", nargs=argparse.REMAINDER, metavar="COMMAND", help="run shell command non-interactively; WARNING#1: this simply concatenates remaining arguments with spaces; WARNING#2: this does not try to escape arguments before passing them onto the domain's shell") logutil.add_arguments(parser) # These three calls go together args = parser.parse_args() logutil.config(args, sys.stderr) logger = logutil.getLogger("kvmsh") # Get things started domain = virsh.Domain(domain_name=args.domain, host_name=args.host_name) # Find a reason to log-in and interact with the console. batch = args.mode == "batch" or args.command interactive = args.mode == "interactive" or (not args.command and args.boot == None and not args.shutdown) # Get the current console, this will be None if the machine is # shutoff (and forced to none if a cold boot) console = domain.console() if args.boot is Boot.cold and console: remote.shutdown(domain, console) console = None status = 0 if args.boot and not (interactive or batch): console = remote.boot_to_login_prompt(domain, console) elif interactive or batch: if console: remote.login(domain, console) else: console = remote.boot_and_login(domain, console) if args.chdir and os.path.isabs(args.chdir): chdir = args.chdir elif args.chdir: chdir = domain.guest_path(console, host_path=args.chdir) else: chdir = None if chdir: domain.logger.info("'cd' to %s", chdir) console.chdir(chdir) if args.command: if interactive: logger.info("info: option --output disabled as it makes pexpect crash when in interactive mode.") else: console.redirect_output(args.output) console.run("") status = console.run(' '.join(args.command), timeout=args.timeout) print() if interactive: print() if args.debug: logger.info("info: pexpect ignores --debug in interactive mode!") logger.info("Escape character is ^]") # Hack so that the prompt appears console.redirect_output(sys.stdout) console.run("") console.redirect_output(None) # Get this terminals properties. columns, rows = os.get_terminal_size() # Normal mode console.stty_sane(term=os.getenv("TERM"), rows=rows, columns=columns) console.interact() if args.shutdown: shutdown_status = remote.shutdown(domain) status = status or shutdown_status sys.exit(status)
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) suffix = "******" test_stats.add(test, "total") # Would the number of tests to be [re]run be better? test_lapsed_time = timing.Lapsed() test_prefix = "%s %s (test %d of %d)" % (suffix, test.name, test_count, tests_count) ignored, include_ignored, details = ignore.test(logger, args, test) if ignored and not include_ignored: result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") # No need to log all the ignored tests when an explicit # sub-set of tests is being run. For instance, when running # just one test. if not args.test_name: logger.info("%s ignored (%s)", test_prefix, details) return # Be lazy with gathering the results, don't run the sanitizer or # diff. # # XXX: There is a bug here where the only difference is white # space. The test will show up as failed when it previousl showed # up as a whitespace pass. # # The presence of the RESULT file is a proxy for detecting that # the test was incomplete. old_result = post.mortem(test, args, test_finished=None, skip_diff=True, skip_sanitize=True) if skip.result(logger, args, old_result): logger.info("%s skipped (previously %s)", test_prefix, old_result) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) return if old_result: test_stats.add(test, "tests", "retry") logger.info("%s started (previously %s) ....", test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s started ....", test_prefix) test_stats.add(test, "tests") # Move the contents of the existing OUTPUT directory to # BACKUP_DIRECTORY. Do it file-by-file so that, at no point, the # directory is empty. # # By moving each test just before it is started a trail of what # tests were attempted at each run is left. # # XXX: During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a test is # added and/or a test is run (adding files under <test>/OUTPUT), # the boot process (and consequently the time taken to run a test) # keeps increasing. # # Always moving the directory contents to the BACKUP_DIRECTORY # mitigates this some. backup_directory = None if os.path.exists(test.output_directory): backup_directory = os.path.join(args.backup_directory, test.name) logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Copy "empty" OUTPUT directories too. args.dry_run or os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) args.dry_run or os.replace(src, dst) debugfile = None result = None # At least one iteration; above will have filtered out skips and # ignored for attempt in range(args.attempts): test_stats.add(test, "attempts") # Create the OUTPUT directory. try: if not args.dry_run: os.mkdir(test.output_directory) elif os.exists(test.output_directory): raise FileExistsError() except FileExistsError: # On first attempt, the OUTPUT directory will be empty # (see above) so no need to save. if attempt > 0: backup_directory = os.path.join(test.output_directory, str(attempt)) logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) args.dry_run or os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): if os.path.isfile(src): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) args.dry_run or os.replace(src, dst) # Start a debug log in the OUTPUT directory; include timing # for this specific test attempt. with logger.timer_stack(), logger.debug_stack(test.output_directory, "debug.log"): attempt_lapsed_time = timing.Lapsed() attempt_prefix = "%s (attempt %d of %d)" % (test_prefix, attempt+1, args.attempts) logger.info("%s started ....", attempt_prefix) if backup_directory: logger.info("contents of '%s' moved to '%s'", test.output_directory, backup_directory) backup_directory = None ending = "undefined" try: if not args.dry_run: _run_test(domain_prefix, test, args, boot_executor) ending = "finished" result = post.mortem(test, args, test_finished=True, update=(not args.dry_run)) if not args.dry_run: # Store enough to fool the script # pluto-testlist-scan.sh and leave a marker to # indicate that the test finished. logger.info("storing result in '%s'", test.result_file()) with open(test.result_file(), "w") as f: f.write('"result": "%s"\n' % result) except pexpect.TIMEOUT as e: logger.exception("**** test %s timed out ****", test.name) ending = "timed-out" # Still peform post-mortem so that errors are # captured, but force the result to incomplete. result = post.mortem(test, args, test_finished=False, update=(not args.dry_run)) # Since the OUTPUT directory exists, all paths to here # should have a non-null RESULT. test_stats.add(test, "attempts", ending, str(result)) logger.info("%s %s%s%s after %s %s", attempt_prefix, result, result.errors and " ", result.errors, attempt_lapsed_time, suffix) if result.passed: break # Above will have set RESULT. During a control-c or crash the # below will not be executed. test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) logger.info("%s %s%s%s after %s %s", test_prefix, result, result.errors and " ", result.errors, test_lapsed_time, suffix) test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def main(): parser = argparse.ArgumentParser(description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]", epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored). While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately). If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified.") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("--quick", action="store_true", help=("Use the previously generated '.console.txt' and '.console.diff' files")) parser.add_argument("--quick-sanitize", action="store_true", help=("Use the previously generated '.console.txt' file")) parser.add_argument("--quick-diff", action="store_true", help=("Use the previously generated '.console.diff' file")) parser.add_argument("--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files")) parser.add_argument("--update-sanitize", action="store_true", help=("Update the '.console.txt' file")) parser.add_argument("--update-diff", action="store_true", help=("Update the '.console.diff' file")) parser.add_argument("--prefix-directory", action="store_true") parser.add_argument("--prefix-name", action="store_true") parser.add_argument("--prefix-output-directory", action="store_true") parser.add_argument("--print-result", action="store_true") parser.add_argument("--print-diff", action="store_true") parser.add_argument("--print-args", action="store_true") parser.add_argument("--print-scripts", action="store_true") parser.add_argument("--print-domains", action="store_true") parser.add_argument("--print-initiators", action="store_true") parser.add_argument("--list-ignored", action="store_true", help="include ignored tests in the list") parser.add_argument("--list-untested", action="store_true", help="include untested tests in the list") parser.add_argument("--baseline", metavar="DIRECTORY", help="a %(metavar)s containing baseline testsuite output") parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="%(metavar)s containing: a test, a testsuite (contains a TESTLIST file), test output, or testsuite output") # Note: this argument serves as documentation only. The # TEST-DIRECTORY argument always consumes all remaining arguments. parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?", help="an optional testsuite directory (contains a TESTLIST file) containing output from a previous test run") post.add_arguments(parser) testsuite.add_arguments(parser) logutil.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmresults") # default to printing results if not args.print_scripts \ and not args.print_result \ and not args.print_diff \ and not args.print_initiators \ and not args.print_domains: args.print_result = True # The option -vvvvvvv is a short circuit for these; make # re-ordering easy by using V as a counter. v = 0 args.prefix_directory = args.prefix_directory or args.verbose > v args.prefix_name = args.prefix_name or args.verbose > v args.print_result = args.print_result or args.verbose > v v += 1 args.prefix_output_directory = args.prefix_output_directory or args.verbose > v v += 1 args.list_untested = args.list_untested or args.verbose > v args.list_ignored = args.list_ignored or args.verbose > v v += 1 args.print_scripts = args.print_scripts or args.verbose > v v += 1 args.print_args = args.print_args or args.verbose > v if args.print_args: post.log_arguments(logger, args) testsuite.log_arguments(logger, args) logutil.log_arguments(logger, args) return 1 # Try to find a baseline. If present, pre-load it. baseline = None if args.baseline: # An explict baseline testsuite, can be more forgiving. baseline = testsuite.load(logger, args, testsuite_directory=args.baseline, testsuite_output_directory=None, error_level=logutil.DEBUG) if not baseline: # Assume that it is baseline output only. if args.testing_directory: baseline_directory = os.path.join(args.testing_directory, "pluto") else: baseline_directory = utils.directory("..", "pluto") baseline = testsuite.load(logger, args, testsuite_directory=baseline_directory, testsuite_output_directory=args.baseline, error_level=logutil.DEBUG) if not baseline: logger.info("'%s' is not a baseline", args.baseline) return 1 elif len(args.directories) > 1: # If there is more than one directory then, perhaps, the last # one is a baseline. A baseline might be: a complete # testsuite snapshot; or just output saved as # testing/pluto/OUTPUT/TESTDIR. baseline = testsuite.load(logger, args, testsuite_directory=args.directories[-1], testsuite_output_directory=None, error_level=logutil.DEBUG) if baseline: # discard the last argument as consumed above. logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1]) args.directories.pop() tests = testsuite.load_testsuite_or_tests(logger, args.directories, args) # And check if not tests: logger.error("Invalid testsuite or test directories") return 1 # When an explicit list of directories was specified always print # all of them (otherwise, tests seem to get lost). if isinstance(tests, list): args.list_untested = True result_stats = stats.Results() try: results(logger, tests, baseline, args, result_stats) finally: result_stats.log_summary(stderr_log, prefix=" ") return 0
def __init__(self, test_directory, testing_directory, saved_test_output_directory, testsuite_output_directory, kind="kvmplutotest", expected_result="good"): self.logger = logutil.getLogger(__name__) # basics self.kind = kind self.expected_result = expected_result # The test's name is always identical to the test directory's # name (aka basename). However, since TEST_DIRECTORY could be # relative (for instance "." or "./..") it first needs to be # made absolute before the basename can be extracted. test_directory = os.path.abspath(test_directory) # The test's name is the same as the directory's basename. self.name = os.path.basename(test_directory) self.full_name = "test " + self.name # Construct the test's relative directory path such that it # always contains the test directory name (i.e., the test # name) as context. For instance: "." gets rewritten as # ../<test>; and ".." gets rewritten as "../../<test>". This # ensures that displayed paths always include some context. # For instance, given "kvmresult.py .", "../<test> passed" # (and not ". passed") will be displayed. self.directory = os.path.join(os.path.relpath(os.path.dirname(test_directory)), self.name) # Directory where the next test run's output should be # written. If a common testsuite output directory was # specified, use that. self.output_directory = ( testsuite_output_directory and os.path.join(testsuite_output_directory, self.name) or os.path.join(self.directory, "OUTPUT")) self.result_file = os.path.join(self.output_directory, "RESULT") # Directory containing saved output from a previous test run. # If the test's output directory was explicitly specified, say # as a parameter to kvmrunner.py vis: # # kvmresults.py testing/pluto/<test>/OUTPUT.OLD # kvmresults.py testing/pluto/OUTPUT/<test> # # than that directory, and not the next output-directory, will # be passed in and saved here. Otherwise it is None, and the # OUTPUT_DIRECTORY should be used. self.saved_output_directory = saved_test_output_directory # An instance of the test directory within a tree that # includes all the post-mortem sanitization scripts. If the # test results have been copied then this will be different to # test.directory. if testing_directory: self.sanitize_directory = os.path.abspath(os.path.join(testing_directory, "pluto", self.name)) else: for sanitize_directory in [self.directory, utils.directory("..", "pluto", self.name)]: # Tentative self.sanitize_directory = os.path.abspath(sanitize_directory) self.logger.debug("is '%s' a test sanitize directory?" % self.sanitize_directory) for path in [self.sanitize_directory, os.path.join(self.sanitize_directory, "..", "..", "default-testparams.sh"), os.path.join(self.sanitize_directory, "..", "..", "sanitizers")]: if not os.path.exists(path): self.logger.debug("test sanitize directory '%s' missing", path) self.sanitize_directory = None break; # will be filled in later self.domains = None self.initiators = None
def main(): parser = argparse.ArgumentParser(description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]", epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored). While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately). If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified.") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("--quick", action="store_true", help=("Use the previously generated '.console.txt' and '.console.diff' files")) parser.add_argument("--quick-sanitize", action="store_true", help=("Use the previously generated '.console.txt' file")) parser.add_argument("--quick-diff", action="store_true", help=("Use the previously generated '.console.diff' file")) parser.add_argument("--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files")) parser.add_argument("--update-sanitize", action="store_true", help=("Update the '.console.txt' file")) parser.add_argument("--update-diff", action="store_true", help=("Update the '.console.diff' file")) parser.add_argument("--print-directory", action="store_true") parser.add_argument("--print-name", action="store_true") # parser.add_argument("--print-result", action="store_true") parser.add_argument("--print-diff", action="store_true") parser.add_argument("--print-args", action="store_true") parser.add_argument("--print-output-directory", action="store_true") parser.add_argument("--list-ignored", action="store_true", help="include ignored tests in the list") parser.add_argument("--list-untested", action="store_true", help="include untested tests in the list") parser.add_argument("directories", metavar="TEST-DIRECTORY", nargs="+", help=("Either a testsuite (only one) or test directory")) # Note: this argument serves as documentation only. The # TEST-DIRECTORY argument always consume all remaining parameters. parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?", help=("An optional testsuite directory containing" " results from a previous test run")) post.add_arguments(parser) testsuite.add_arguments(parser) logutil.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmresults") # The option -vvvvvvv is a short circuit for these; make # re-ordering easy by using V as a counter. v = 0 args.print_directory = args.print_directory or args.verbose > v args.print_name = args.print_name or args.verbose > v v += 1 args.print_output_directory = args.print_output_directory or args.verbose > v v += 1 args.list_untested = args.list_untested or args.verbose > v ; v += 1 args.list_ignored = args.list_ignored or args.verbose > v ; v += 1 v += 1 args.print_args = args.print_args or args.verbose > v if args.print_args: post.log_arguments(logger, args) testsuite.log_arguments(logger, args) logutil.log_arguments(logger, args) return 1 # Is the last argument some sort of baseline? If it is, pre-load # it. # # XXX: Should also support something like --baseline-testsuite and # --baseline-output parameters. baseline = None if len(args.directories) > 1: # If there is more than one directory then, perhaps, the last # one is a baseline. A baseline might be: a complete # testsuite snapshot; or just output saved as # testing/pluto/OUTPUT/TESTDIR. baseline = testsuite.load(logger, args.directories[-1], args, error_level=logutil.DEBUG) if baseline: # discard the last argument as consumed above. logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1]) args.directories.pop() tests = testsuite.load_testsuite_or_tests(logger, args.directories, args) # And check if not tests: logger.error("Invalid testsuite or test directories") return 1 # When an explicit list of directories was specified always print # all of them (otherwise, tests seem to get lost). if isinstance(tests, list): args.list_untested = True for test in tests: # Produce separate runtimes for each test. with logutil.TIMER: logger.debug("start processing test %s", test.name) # Filter out tests that are being ignored? ignore = testsuite.ignore(test, args) if ignore and not args.list_ignored: continue # Filter out tests that have not been run? result = None if not ignore: result = post.mortem(test, args, baseline=baseline, output_directory=test.saved_output_directory, skip_sanitize=args.quick or args.quick_sanitize, skip_diff=args.quick or args.quick_diff, update=args.update, update_sanitize=args.update_sanitize, update_diff=args.update_diff) if not result and not args.list_untested: continue sep = "" # Print the test's name/path if not args.print_directory and not args.print_name and not args.print_output_directory: # By default: when the path given on the command line # explicitly specifies a test's output directory # (found in TEST.SAVED_OUTPUT_DIRECTORY), print that; # otherwise print the path to the test's directory. print(sep, end="") print((test.saved_output_directory and test.saved_output_directory or test.directory), end="") sep = " " else: # Print the test name/path per command line if args.print_name: print(sep, end="") print(test.name, end="") sep = " " if args.print_directory: print(sep, end="") print(test.directory, end="") sep = " " if args.print_output_directory: print(sep, end="") print((test.saved_output_directory and test.saved_output_directory or test.output_directory), end="") sep = " " if ignore: print(sep, end="") print("ignored", ignore, end="") sep = " " print(sep, end="") if result.errors: print(result, result.errors, end="") else: print(result, end="") sep = " " print() if args.print_diff and result: for domain in result.diffs: for line in result.diffs[domain]: if line: print(line) sys.stdout.flush() logger.debug("stop processing test %s", test.name) return 0
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_start_time = test_total_time = None # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) with logger.time("processing test %s", test_prefix) as test_total_time: ignored, include_ignored, details = ignore.test(logger, args, test) if ignored and not include_ignored: result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") # No need to log all the ignored tests when an explicit # sub-set of tests is being run. For instance, when running # just one test. if not args.test_name: logger.info("$s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Be lazy with gathering the results, don't run the sanitizer or # diff. # # XXX: There is a bug here where the only difference is white # space. The test will show up as failed when it previously # showed up as a white-space pass. # # The presence of the RESULT file is a proxy for detecting that # the test was incomplete. old_result = post.mortem(test, args, test_finished=None, skip_diff=True, skip_sanitize=True) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) return if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create the OUTPUT/ directory; if it already exists, move any # contents to BACKUP/. Do it file-by-file so that, at no # point, the OUTPUT/ directory missing (presence of OUTPUT/ # implies the test was started). # # By backing up each test just before it is started, a trail # of what tests were attempted during each run is created. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a test # is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the time # taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is not # under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileExistsError: backup_directory = os.path.join(args.backup_directory, test.name) logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, copy it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug log # writing to that directory; include timing for this test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join(test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: logger.exception("timeout while booting domains", test.name) # Bail before RESULT is written - being unable to # boot the domains is a disaster. return # Run the scripts directly with logger.time( "running scripts %s", " ".join(("%s:%s" % (host, script)) for host, script in test.host_script_tuples)) as test_script_time: try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join( test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.output(open(output, "w")) for host, script in test.host_script_tuples: if args.stop_at == script: logger.error( "stopping test run at (before executing) script %s", script) break test_domain = test_domains[host] test_domain.read_file_run(script) result = post.mortem(test, args, test_finished=True, update=True) except pexpect.TIMEOUT as e: logger.exception( "**** timeout out while running script %s ****", script) # Still peform post-mortem so that errors are # captured, but force the result to # incomplete. result = post.mortem(test, args, test_finished=False, update=True) finally: # Close the redirected test-result log files for test_domain in test_domains.values(): logfile = test_domain.console.output() logfile.close() # Always disconnect from the test domains. logger.info("closing all test domains") for test_domain in test_domains.values(): logfile = test_domain.console.output() if logfile: logfile.close() test_domain.close() # Above will have set RESULT. Exceptions such as control-c or # a crash bypass this code. logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.errors and " ", result.errors, suffix) # Since the test finished, emit enough JSON to fool scripts like # pluto-testlist-scan.sh. # # This also leaves a simple marker to indicate that the test # finished. # # A more robust way of doing this would be to mark each of the # console logs as complete as it is closed. # # More detailed information can be extracted from the debug.log. hosts = {} for host in sorted(test.host_names): if host in result.errors: hosts[host] = [error for error in result.errors[host]] else: hosts[host] = ["passed"] RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.expected_result, jsonutil.result.result: str(result), jsonutil.result.time: jsonutil.ftime(datetime.now()), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), jsonutil.result.total_time: round(test_total_time.seconds(), 1), jsonutil.result.hosts: hosts, } j = jsonutil.dumps(RESULT) logger.info("filling '%s' with json: %s", test.result_file(), j) with open(test.result_file(), "w") as f: f.write(j) f.write("\n") test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_post_time = None old_result = None backup_directory = os.path.join(args.backup_directory, test.name) # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) publish.json_status(logger, args, "processing %s" % test_prefix) with logger.time("processing test %s", test_prefix): # always perform post mortem on the test directory. try: ignored, details = ignore.test(logger, args, test) if ignored: # If there is any pre-existing output move it to # backup. Otherwise it looks like the test was run # when it wasn't (and besides, the output is no longer # applicable). # # The isdir() test followed by a simple move, while # racy, should be good enough. if os.path.isdir(test.output_directory): logger.info("moving '%s' to '%s'", test.output_directory, backup_directory) os.makedirs(os.path.dirname(backup_directory), exist_ok=True) os.rename(test.output_directory, backup_directory) result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Be lazy when gathering the results, don't run the # sanitizer or diff. Let post.mortem figure out if the # test finished. old_result = post.mortem(test, args, domain_prefix=domain_prefix, quick=True) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) return if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create just the OUTPUT/ directory; if it already exists, # move any contents to BACKUP/. Do it file-by-file so # that, at no point, the OUTPUT/ directory is missing # (having an OUTPUT/ directory implies the test was # started). # # Don't create the path. If the parent directory is # missing, this will fail. # # By backing up each test just before it is started, a # trail of what tests were attempted during each run is # created. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a # test is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the # time taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is # not under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileNotFoundError: # Bail, something is messed up (for instance the parent directory doesn't exist). return except FileExistsError: logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, move it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug # log writing to that directory; include timing for this # test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join( test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: # boot the domains with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains( logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: logger.exception("timeout while booting domains") # Bail. Being unable to boot the domains is a # disaster. The test is UNRESOLVED. return # Run the scripts directly with logger.time( "running scripts %s", " ".join(("%s:%s" % (host, script)) for host, script in test.host_script_tuples)) as test_script_time: with tcpdump.Dump( logger, domain_prefix, test.output_directory, [ test_domain.domain for test_domain in test_domains.values() ], enable=args.tcpdump): try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join( test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.output(open(output, "w")) for host, script in test.host_script_tuples: if args.stop_at == script: logger.error( "stopping test run at (before executing) script %s", script) break test_domain = test_domains[host] try: test_domain.read_file_run(script) except BaseException as e: # if there is an exception, write # it to the console test_domain.console.child.logfile.write( "\n*** exception running script %s ***\n%s" % (script, str(e))) raise for test_domain in test_domains.values(): test_domain.console.child.logfile.write( post.DONE) except pexpect.TIMEOUT as e: # A test ending with a timeout gets # treated as unresolved. Timeouts # shouldn't occur so human intervention # is required. logger.error( "**** timeout out while running test script %s ****", script) finally: # Close the redirected test-result log files logger.info( "closing all the test domain log files") for test_domain in test_domains.values(): outfile = test_domain.console.output() outfile.close() # Always disconnect from the test domains. logger.info("closing all the test domains") for test_domain in test_domains.values(): test_domain.close() finally: with logger.time("post-mortem %s", test_prefix): # The test finished; it is assumed that post.mortem # can deal with a crashed test. result = post.mortem(test, args, domain_prefix=domain_prefix, update=True) logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.issues and " ", result.issues, suffix) # If the test was run (a fresh run would delete RESULT) # and finished (resolved in POSIX terminology), emit # enough JSON to fool scripts like pluto-testlist-scan.sh. # # A test that timed-out or crashed, isn't considered # resolved so the file isn't created. # # XXX: this should go away. if not os.path.isfile(test.result_file()) \ and result.resolution.isresolved(): RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.status, jsonutil.result.result: result, jsonutil.result.issues: result.issues, jsonutil.result.hosts: test.host_names, jsonutil.result.time: jsonutil.ftime(test_runtime.start), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), jsonutil.result.total_time: round(test_runtime.seconds(), 1), } j = jsonutil.dumps(RESULT) logger.debug("filling '%s' with json: %s", test.result_file(), j) with open(test.result_file(), "w") as f: f.write(j) f.write("\n") # Do this after RESULT is created so it too is published. publish.everything(logger, args, result) publish.json_status(logger, args, "finished %s" % test_prefix) test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) # test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def main(): parser = argparse.ArgumentParser( description="list test results", epilog= "By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored). While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately)." ) parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument( "--quick", action="store_true", help= ("Use the previously generated '.console.txt' and '.console.diff' files" )) parser.add_argument( "--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files")) parser.add_argument("--dump-args", action="store_true") # how to parse --print directory,saved-directory,...? parser.add_argument( "--print", action="store", default=Print(Print.path, Print.result, Print.issues), type=Print, metavar=str(Print), help= "comman separate list of attributes to print for each test; default: '%(default)s'" ) parser.add_argument( "--stats", action="store", default=Stats.summary, type=Stats, choices=[c for c in Stats], help="provide overview statistics; default: \"%(default)s\"") baseline_metavar = "BASELINE-DIRECTORY" baseline_help = "additional %(metavar)s containing results to compare against; any divergence between the test and baseline results are displayed" parser.add_argument("--baseline", "-b", metavar=baseline_metavar, help=baseline_help) parser.add_argument( "--json", action="store_true", help= "output each result as an individual json object (pipe the output through 'jq -s .' to convert it to a well formed json list" ) parser.add_argument( "directories", metavar="DIRECTORY-OR-FILE", nargs="+", help= "a directory containing: a test, testsuite, test output, or testsuite output; or a file containing a 'TESTLIST'" ) # Note: this argument serves as documentation only. The RESULT # argument should consumes all remaining parameters. parser.add_argument("baseline_ignored", nargs="?", metavar=baseline_metavar, help=baseline_help) testsuite.add_arguments(parser) logutil.add_arguments(parser) skip.add_arguments(parser) ignore.add_arguments(parser) # These three calls go together args = parser.parse_args() logutil.config(args, sys.stderr) logger = logutil.getLogger("kvmresults") # The option -vvvvvvv is a short circuit for these; make # re-ordering easy by using V as a counter. v = 0 if args.dump_args: logger.info("Arguments:") logger.info(" Stats: %s", args.stats) logger.info(" Print: %s", args.print) logger.info(" Baseline: %s", args.baseline) logger.info(" Json: %s", args.json) logger.info(" Quick: %s", args.quick) logger.info(" Update: %s", args.update) testsuite.log_arguments(logger, args) logutil.log_arguments(logger, args) skip.log_arguments(logger, args) ignore.log_arguments(logger, args) return 0 # Try to find a baseline. If present, pre-load it. baseline = None if args.baseline: # An explict baseline testsuite, can be more forgiving in how # it is loaded. baseline = testsuite.load(logger, logutil.DEBUG, args, testsuite_directory=args.baseline, error_level=logutil.DEBUG) if not baseline: # Perhaps the baseline just contains output, magic up the # corresponding testsuite directory. baseline_directory = os.path.join(args.testing_directory, "pluto") baseline = testsuite.load(logger, logutil.DEBUG, args, testsuite_directory=baseline_directory, testsuite_output_directory=args.baseline, error_level=logutil.DEBUG) if not baseline: logger.info("'%s' is not a baseline", args.baseline) return 1 elif len(args.directories) > 1: # If there is more than one directory then, perhaps, the last # one is a baseline. A baseline might be: a complete # testsuite snapshot; or just output saved as # testing/pluto/OUTPUT/TESTDIR. baseline = testsuite.load(logger, logutil.DEBUG, args, testsuite_directory=args.directories[-1]) if baseline: # discard the last argument as consumed above. logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1]) args.directories.pop() tests = testsuite.load_testsuite_or_tests(logger, args.directories, args) # And check if not tests: logger.error("Invalid testsuite or test directories") return 1 result_stats = stats.Results() try: results(logger, tests, baseline, args, result_stats) finally: if args.stats is Stats.details: result_stats.log_details(stderr_log, header="Details:", prefix=" ") if args.stats in [Stats.details, Stats.summary]: result_stats.log_summary(stderr_log, header="Summary:", prefix=" ") return 0
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_total_time = None # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) with logger.time("processing test %s", test_prefix) as test_total_time: ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Be lazy when gathering the results, don't run the sanitizer # or diff. Let post.mortem figure out if the test finished. old_result = post.mortem(test, args, domain_prefix=domain_prefix, quick=True, finished=None) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) return if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create just the OUTPUT/ directory; if it already exists, # move any contents to BACKUP/. Do it file-by-file so that, # at no point, the OUTPUT/ directory is missing (having an # OUTPUT/ directory implies the test was started). # # Don't create the path. If the parent directory is missing, # this will fail. # # By backing up each test just before it is started, a trail # of what tests were attempted during each run is created. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a test # is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the time # taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is not # under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileNotFoundError: # Bail, something is messed up (for instance the parent directory doesn't exist). return except FileExistsError: backup_directory = os.path.join(args.backup_directory, test.name) logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, copy it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug log # writing to that directory; include timing for this test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join(test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: logger.exception("timeout while booting domains") # Bail before RESULT is written - being unable to # boot the domains is a disaster. The test is # UNRESOLVED. return # Run the scripts directly with logger.time( "running scripts %s", " ".join(("%s:%s" % (host, script)) for host, script in test.host_script_tuples)) as test_script_time: with tcpdump.Dump(logger, domain_prefix, test.output_directory, [ test_domain.domain for test_domain in test_domains.values() ], enable=args.tcpdump): try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join( test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.output(open(output, "w")) try: for host, script in test.host_script_tuples: if args.stop_at == script: logger.error( "stopping test run at (before executing) script %s", script) break test_domain = test_domains[host] test_domain.read_file_run(script) except pexpect.TIMEOUT as e: # A test ending with a timeout is still a # finished test. Analysis of the results # will detect this and flag it as a fail. logger.error( "**** timeout out while running script %s ****", script) finally: # Close the redirected test-result log files for test_domain in test_domains.values(): logfile = test_domain.console.output() logfile.close() # Always disconnect from the test domains. logger.info("closing all test domains") for test_domain in test_domains.values(): logfile = test_domain.console.output() if logfile: logfile.close() test_domain.close() # The test finished. Aborts such as a failed boot, or a timeout, # will skip all the below. result = post.mortem(test, args, domain_prefix=domain_prefix, finished=True, update=True) logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.issues and " ", result.issues, suffix) # Since the the test finished (resolved in POSIX terminology)", # emit enough JSON to fool scripts like pluto-testlist-scan.sh. # # A test that timed-out or crashed, isn't considered resolved. # # A more robust way of doing this would be to mark each of the # console logs as complete as it is closed. # # More detailed information can be extracted from the debug.log. RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.status, jsonutil.result.result: result, jsonutil.result.issues: result.issues, jsonutil.result.hosts: test.host_names, jsonutil.result.time: jsonutil.ftime(test_total_time.start), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), jsonutil.result.total_time: round(test_total_time.seconds(), 1), } j = jsonutil.dumps(RESULT) logger.info("filling '%s' with json: %s", test.result_file(), j) with open(test.result_file(), "w") as f: f.write(j) f.write("\n") test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def __init__(self, test_directory, testing_directory, saved_test_output_directory=None, testsuite_output_directory=None, kind="kvmplutotest", status="good"): self.logger = logutil.getLogger(__name__) # basics self.kind = kind self.status = status # The test's name is always identical to the test directory's # name (aka basename). However, since TEST_DIRECTORY could be # relative (for instance "." or "./..") it first needs to be # made absolute before the basename can be extracted. test_directory = os.path.realpath(test_directory) # The test's name is the same as the directory's basename. self.name = os.path.basename(test_directory) self.full_name = "test " + self.name # Construct the test's relative directory path such that it # always contains the test directory name (i.e., the test # name) as context. For instance: "." gets rewritten as # ../<test>; and ".." gets rewritten as "../../<test>". This # ensures that displayed paths always include some context. # For instance, given "kvmresult.py .", "../<test> passed" # (and not ". passed") will be displayed. self.directory = os.path.join( os.path.relpath(os.path.dirname(test_directory)), self.name) # Directory where the next test run's output should be # written. If a common testsuite output directory was # specified, use that. if testsuite_output_directory: self.output_directory = os.path.join(testsuite_output_directory, self.name) else: self.output_directory = os.path.join(self.directory, "OUTPUT") # Directory containing saved output from a previous test run. # If the test's output directory was explicitly specified, say # as a parameter to kvmrunner.py vis: # # kvmresults.py testing/pluto/<test>/OUTPUT.OLD # kvmresults.py testing/pluto/OUTPUT/<test> # # than that directory, and not the next output-directory, will # be passed in and saved here. Otherwise it is None, and the # OUTPUT_DIRECTORY should be used. if saved_test_output_directory: self.saved_output_directory = saved_test_output_directory else: self.saved_output_directory = None # The testing_directory to use when performing post.mortem # tasks such as running the sanitizer. # # Since test.directory may be incomplete (sanitizers directory # may be missing), use the testing directory belonging to this # script. if testing_directory: # trust it self._testing_directory = os.path.relpath(testing_directory) else: self._testing_directory = utilsdir.relpath("..") # Get an ordered list of (host,script) pairs of all the # scripts that need to be run. self.host_script_tuples = scripts.host_script_tuples(self.directory) # Just assume any host mentioned in scripts needs to run. host_names = set() for host, script in self.host_script_tuples: host_names.add(host) self.host_names = sorted(host_names)
def main(): # If SIGUSR1, backtrace all threads; hopefully this is early # enough. faulthandler.register(signal.SIGUSR1) parser = argparse.ArgumentParser(description="Run tests", epilog="SIGUSR1 will dump all thread stacks") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="a testsuite directory, a TESTLIST file, or a list of test directories") testsuite.add_arguments(parser) runner.add_arguments(parser) logutil.add_arguments(parser) skip.add_arguments(parser) ignore.add_arguments(parser) publish.add_arguments(parser) # These three calls go together args = parser.parse_args() logutil.config(args, sys.stdout) logger = logutil.getLogger("kvmrunner") logger.info("Options:") logger.info(" directories: %s", args.directories) logger.info(" verbose: %s", args.verbose) testsuite.log_arguments(logger, args) runner.log_arguments(logger, args) logutil.log_arguments(logger, args) skip.log_arguments(logger, args) ignore.log_arguments(logger, args) publish.log_arguments(logger, args) tests = testsuite.load_testsuite_or_tests(logger, args.directories, args, log_level=logutil.INFO) if not tests: logger.error("test or testsuite directory invalid: %s", args.directories) return 1 test_stats = stats.Tests() result_stats = stats.Results() try: exit_code = 0 logger.info("run started at %s", timing.START_TIME) runner.run_tests(logger, args, tests, test_stats, result_stats) except KeyboardInterrupt: logger.exception("**** interrupted ****") exit_code = 1 test_stats.log_details(args.verbose and logger.info or logger.debug, header="final stat details:", prefix=" ") result_stats.log_details(logger.info, header="final test details:", prefix=" ") test_stats.log_summary(logger.info, header="final test stats:", prefix=" ") result_stats.log_summary(logger.info, header="final test results:", prefix=" ") stop_time = datetime.now() logger.info("run finished at %s after %s", stop_time, stop_time - timing.START_TIME) return exit_code
def __init__(self, testsuite, error_level): self.error_level = error_level self.logger = logutil.getLogger(__name__) self.testsuite = testsuite self.test_list = open(testsuite.testlist, 'r')
def main(): parser = argparse.ArgumentParser(description="Connect to and run a shell command on a virtual machine domain", epilog=("If no command or file is specified an interactive shell is created.")) parser.add_argument("--timeout", type=argutil.timeout, default=None, help=("maximum runtime for the command" "; -1 for no timeout" " (default: no timeout)")) argutil.add_redirect_argument(parser, "re-direct console output from stdout to %(metavar)s", "--output", "-o", default=sys.stdout, metavar="FILE") parser.add_argument("--chdir", default=None, action="store", metavar="PATH", help=("first change directory to %(metavar)s on the remote" " domain and update prompt-match logic to expect" " that directory" "; an absolute %(metavar)s is used unmodified" "; a relative %(metavar)s, which is interpreted" " as relative to the current local working directory" ", is converted to an absolute remote path before use" " (default: leave directory unchanged)")) parser.add_argument("--boot", default=None, action="store", type=Boot, choices=[e for e in Boot], help=("force the domain to boot" "; 'cold': power-off any existing domain" "; 'warm': reboot any existing domain" " (default: leave existing domain running)")) parser.add_argument("--shutdown", default=False, action="store_true", help=("on-completion shut down the domain" " (default: leave the domain running)")) parser.add_argument("--mode", default=None, choices=set(["interactive", "batch"]), help=("enter mode" " (default: if there is no command enter interactive mode)")) parser.add_argument("--host-name", default=None, help="The virtual machine's host name") parser.add_argument("domain", action="store", metavar="DOMAIN", help="virtual machine (domain) to connect to") parser.add_argument("command", nargs=argparse.REMAINDER, metavar="COMMAND", help="run shell command non-interactively; WARNING#1: this simply concatenates remaining arguments with spaces; WARNING#2: this does not try to escape arguments before passing them onto the domain's shell") logutil.add_arguments(parser) # These three calls go together args = parser.parse_args() logutil.config(args, sys.stderr) logger = logutil.getLogger("kvmsh") # Get things started domain = virsh.Domain(domain_name=args.domain, host_name=args.host_name) # Find a reason to log-in and interact with the console. batch = args.mode == "batch" or args.command interactive = args.mode == "interactive" or (not args.command and args.boot == None and not args.shutdown) # Get the current console, this will be None if the machine is # shutoff. console = domain.console() if args.boot: if args.boot is Boot.cold and console: remote.shutdown(domain, console) console = None console = remote.boot_to_login_prompt(domain, console) elif (interactive or batch) and not console: console = remote.boot_to_login_prompt(domain, console) status = 0 if interactive or batch: remote.login(domain, console) if args.chdir and os.path.isabs(args.chdir): chdir = args.chdir elif args.chdir: chdir = remote.directory(domain, console, directory=os.path.realpath(args.chdir)) else: chdir = None if chdir: domain.logger.info("'cd' to %s", chdir) console.chdir(chdir) if args.command: console.output(args.output) console.run("") status = console.run(' '.join(args.command), timeout=args.timeout) print() if interactive: print() output = console.output(None) if output: logger.info("info: option --output disabled as it makes pexpect crash when in interactive mode.") if args.debug: logger.info("info: pexpect ignores --debug in interactive mode!") logger.info("Escape character is ^]") # Hack so that the prompt appears console.output(sys.stdout) console.run("") console.output() # Get this terminals properties. columns, rows = os.get_terminal_size() # Normal mode console.stty_sane(term=os.getenv("TERM"), rows=rows, columns=columns) console.interact() if args.shutdown: shutdown_status = remote.shutdown(domain) status = status or shutdown_status sys.exit(status)
def main(): parser = argparse.ArgumentParser(description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("--print-directory", action="store_true") parser.add_argument("--print-name", action="store_true") parser.add_argument("--print-result", action="store_true") parser.add_argument("--list-ignored", action="store_true", help="include ignored tests in the list") parser.add_argument("--list-untested", action="store_true", help="include untested tests in the list") parser.add_argument("directories", metavar="TEST-DIRECTORY", nargs="+", help=("Either a testsuite (only one) or test directory")) # Note: this argument serves as documentation only. The # TEST-DIRECTORY argument always consume all remaining parameters. parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?", help=("An optional testsuite directory containing" " results from a previous test run")) testsuite.add_arguments(parser) logutil.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmresults") # The option -vvvvvvv is a short circuit for these; make # re-ordering easy by using V as a counter. v = 0 args.print_directory = args.print_directory or args.verbose > v args.print_name = args.print_name or args.verbose > v v += 1 args.list_untested = args.list_untested or args.verbose > v ; v += 1 args.list_ignored = args.list_ignored or args.verbose > v ; v += 1 # By default print the relative directory path. if not args.print_directory and not args.print_name: args.print_directory = True # If there is more than one directory then the last might be the # baseline. Try loading it as a testsuite (baselines are # testsuites) to see if that is the case. basetests = None tests = None if len(args.directories) > 1: # Perhaps the last argument is the baseline? basetests = testsuite.load(logger, args.directories[-1]) if basetests: logger.debug("basetests loaded from '%s'", basetests.directory) args.directories.pop() tests = testsuite.load_testsuite_or_tests(logger, args.directories) logger.debug("basetests=%s", basetests) logger.debug("tests=%s", tests) # And check if not tests: logger.error("Invalid testsuite or test directories") return 1 # When an explicit list of directories was specified always print # all of them (otherwise, tests seem to get lost). if isinstance(tests, list): args.list_untested = True # Preload the baseline. This avoids re-scanning the TESTLIST and, # when errors, printing those repeatedly. Also, passing the full # baseline to Test.results() lets that function differentiate # between a baseline missing results or being entirely absent. baseline = None if basetests: baseline = {} for test in basetests: baseline[test.name] = test for test in tests: # Filter out tests that are being ignored? ignore = testsuite.ignore(test, args) if ignore and not args.list_ignored: continue # Filter out tests that have not been run? result = None if not ignore: result = test.result(baseline) if not result and not args.list_untested: continue sep = "" if args.print_name: print(sep, end="") print(test.name, end="") sep = " " if args.print_directory: print(sep, end="") print(test.directory, end="") sep = " " if ignore: print(sep, end="") print("ignored", ignore, end="") sep = " " if result: print(sep, end="") print(result, end="") sep = " " print() sys.stdout.flush() return 0
def mortem(test, args, domain_prefix="", baseline=None, output_directory=None, quick=False): logger = logutil.getLogger(domain_prefix, __name__, test.name) test_result = TestResult(logger, test, quick, output_directory=output_directory) if not test_result: return test_result if not baseline: return test_result # For "baseline", the general idea is that "kvmresults.py | grep # baseline" should print something when either a regression or # progression has occurred. For instance: # # - a test passing but the baseline failing # # - a test failing, but the baseline passing # # - a test failing, and the baseline failling in a different way # # What isn't interesting is a test and the baseline failing the # same way. if not test.name in baseline: test_result.issues.add(Issues.ABSENT, "baseline") return test_result # When loading the baseline results use "quick" so that the # original results are used. This seems to be the best of a bad # bunch. # # Since that the baseline was generated using an old sanitizer and # reference output, using the latest sanitizer scripts (in # testing/) can, confusingly, lead to baselines results being # identified as failures failing yet the diffs show a pass. # # OTOH, when this goes to compare the results against the # baseline, first putting them through the latest sanitizer tends # to result in better diffs. base = baseline[test.name] baseline_result = TestResult(logger, base, quick=True) if not baseline_result.resolution in [test_result.resolution.PASSED, test_result.resolution.FAILED]: test_result.issues.add(str(baseline_result), "baseline") return test_result if test_result.resolution in [test_result.resolution.PASSED] \ and baseline_result.resolution in [baseline_result.resolution.PASSED]: return test_result for host_name in test.host_names: # result missing output; still check baseline .. if host_name not in test_result.sanitized_output: if host_name in baseline_result.sanitized_output: if host_name in baseline_result.diffs: test_result.issues.add(Issues.BASELINE_FAILED, host_name) else: test_result.issues.add(Issues.BASELINE_PASSED, host_name) continue if not host_name in baseline_result.sanitized_output: test_result.issues.add(Issues.BASELINE_MISSING, host_name) continue if not host_name in test_result.diffs: if host_name in baseline_result.diffs: test_result.issues.add(Issues.BASELINE_FAILED, host_name) continue if not host_name in baseline_result.diffs: test_result.issues.add(Issues.BASELINE_PASSED, host_name) continue baseline_diff = _diff(logger, "BASELINE/" + test.directory + "/" + host_name + ".console.txt", baseline_result.sanitized_output[host_name], "OUTPUT/" + test.directory + "/" + host_name + ".console.txt", test_result.sanitized_output[host_name]) if baseline_diff: baseline_whitespace = _whitespace(baseline_result.sanitized_output[host_name], test_result.sanitized_output[host_name]) if baseline_whitespace: test_result.issues.add(Issues.BASELINE_WHITESPACE, host_name) else: test_result.issues.add(Issues.BASELINE_DIFFERENT, host_name) # update the diff to something hopefully closer? # test_result.diffs[host_name] = baseline_diff # else: # test_result.issues.add("baseline-failed", host_name) return test_result
def __init__(self, test_directory, testing_directory, saved_test_output_directory=None, saved_testsuite_output_directory=None, testsuite_output_directory=None, kind="kvmplutotest", expected_result="good"): self.logger = logutil.getLogger(__name__) # basics self.kind = kind self.expected_result = expected_result # The test's name is always identical to the test directory's # name (aka basename). However, since TEST_DIRECTORY could be # relative (for instance "." or "./..") it first needs to be # made absolute before the basename can be extracted. test_directory = os.path.realpath(test_directory) # The test's name is the same as the directory's basename. self.name = os.path.basename(test_directory) self.full_name = "test " + self.name # Construct the test's relative directory path such that it # always contains the test directory name (i.e., the test # name) as context. For instance: "." gets rewritten as # ../<test>; and ".." gets rewritten as "../../<test>". This # ensures that displayed paths always include some context. # For instance, given "kvmresult.py .", "../<test> passed" # (and not ". passed") will be displayed. self.directory = os.path.join(os.path.relpath(os.path.dirname(test_directory)), self.name) # Directory where the next test run's output should be # written. If a common testsuite output directory was # specified, use that. if testsuite_output_directory: self.output_directory = os.path.join(testsuite_output_directory, self.name) else: self.output_directory = os.path.join(self.directory, "OUTPUT") # Directory containing saved output from a previous test run. # If the test's output directory was explicitly specified, say # as a parameter to kvmrunner.py vis: # # kvmresults.py testing/pluto/<test>/OUTPUT.OLD # kvmresults.py testing/pluto/OUTPUT/<test> # # than that directory, and not the next output-directory, will # be passed in and saved here. Otherwise it is None, and the # OUTPUT_DIRECTORY should be used. if saved_test_output_directory: self.saved_output_directory = saved_test_output_directory elif saved_testsuite_output_directory: self.saved_output_directory = os.path.join(saved_testsuite_output_directory, self.name) else: self.saved_output_directory = None # An instance of the test directory within a tree that # includes all the post-mortem sanitization scripts. If the # test results have been copied then this will be different to # test.directory. self.sanitize_directory = os.path.realpath(os.path.join(testing_directory, "pluto", self.name)) scripts = _scripts(self.directory) # host_names that this test requires, determine it from the # script names. self.host_names = set() for host_name in HOST_NAMES: for script in scripts: if re.search(host_name, script): self.host_names.add(host_name) break # figure out the scripts that need running self.scripts = [] # init scripts _add_matching(self.scripts, scripts, ["nic"], "nicinit.sh") _add_matching(self.scripts, scripts, ["east"], "eastinit.sh") for host_name in sorted(self.host_names): _add_matching(self.scripts, scripts, [host_name], host_name + "init.sh") # run scripts for host_name in sorted(self.host_names): _add_matching(self.scripts, scripts, [host_name], host_name + "run.sh") # strip out the final script final = [] _add_matching(final, scripts, sorted(self.host_names), "final.sh") # what's left is the middle scripts, not exactly a smart way # to do this for script in sorted(scripts): for host_name in sorted(self.host_names): if re.search(host_name, script): self.scripts.append(Script(host_name, script)) # append the final scripts for script in final: self.scripts.append(script)
def main(): # If SIGUSR1, backtrace all threads; hopefully this is early # enough. faulthandler.register(signal.SIGUSR1) parser = argparse.ArgumentParser( description="list test results", epilog= "By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored). While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately). SIGUSR1 will dump all thread stacks" ) parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument( "--exit-ok", action="store_true", help= ("return a zero exit status; normally, when there are failures, a non-zero exit status is returned" )) parser.add_argument( "--quick", action="store_true", help= ("Use the previously generated '.console.txt' and '.console.diff' files" )) parser.add_argument( "--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files")) parser.add_argument("--dump-args", action="store_true") # how to parse --print directory,saved-directory,...? parser.add_argument( "--print", action="store", default=printer.Print(printer.Print.PATH, printer.Print.RESULT, printer.Print.ISSUES), type=printer.Print, metavar=str(printer.Print), help= "comman separate list of attributes to print for each test; default: '%(default)s'" ) parser.add_argument( "--stats", action="store", default=Stats.summary, type=Stats, choices=[c for c in Stats], help="provide overview statistics; default: \"%(default)s\"") baseline_metavar = "BASELINE-DIRECTORY" baseline_help = "additional %(metavar)s containing results to compare against; any divergence between the test and baseline results are displayed" parser.add_argument("--baseline", "-b", metavar=baseline_metavar, help=baseline_help) parser.add_argument( "--json", action="store_true", help= "output each result as an individual json object (pipe the output through 'jq -s .' to convert it to a well formed json list" ) parser.add_argument( "directories", metavar="DIRECTORY-OR-FILE", nargs="+", help= "a directory containing: a test, testsuite, test output, or testsuite output; or a file containing a 'TESTLIST'" ) # Note: this argument serves as documentation only. The RESULT # argument should consumes all remaining parameters. parser.add_argument("baseline_ignored", nargs="?", metavar=baseline_metavar, help=baseline_help) testsuite.add_arguments(parser) logutil.add_arguments(parser) # XXX: while checking for an UNTESTED test should be very cheap # (does OUTPUT/ exist?) it isn't. Currently it triggers a full # post-mortem analysis. skip.add_arguments(parser, skip.Skip.UNTESTED) ignore.add_arguments(parser) publish.add_arguments(parser) # These three calls go together args = parser.parse_args() logutil.config(args, sys.stderr) logger = logutil.getLogger("kvmresults") if args.dump_args: logger.info("Arguments:") logger.info(" Stats: %s", args.stats) logger.info(" Print: %s", args.print) logger.info(" Baseline: %s", args.baseline) logger.info(" Json: %s", args.json) logger.info(" Quick: %s", args.quick) logger.info(" Update: %s", args.update) logger.info(" Exit OK: %s", args.exit_ok) testsuite.log_arguments(logger, args) logutil.log_arguments(logger, args) skip.log_arguments(logger, args) ignore.log_arguments(logger, args) publish.log_arguments(logger, args) return 0 # Try to find a baseline. If present, pre-load it. baseline = None if args.baseline: # An explict baseline testsuite, can be more forgiving in how # it is loaded. baseline = testsuite.load(logger, logutil.DEBUG, args, testsuite_directory=args.baseline, error_level=logutil.DEBUG) if not baseline and os.path.isdir(args.baseline): # Perhaps, AKA BACKUP/YYYY-MM-DDD-..., the baseline # directory only contains a copy of the output. Magic up # a baseline by combining the output with the tests in # ARGS.TESTING_DIRECTORY. baseline_directory = os.path.join(args.testing_directory, "pluto") baseline = testsuite.load(logger, logutil.DEBUG, args, testsuite_directory=baseline_directory, testsuite_output_directory=args.baseline, error_level=logutil.DEBUG) if not baseline: logger.info("'%s' is not a baseline", args.baseline) return 1 elif len(args.directories) > 1: # If there is more than one directory then, perhaps, the last # one is a baseline. A baseline might be: a complete # testsuite snapshot; or just output saved as # testing/pluto/OUTPUT/TESTDIR. baseline = testsuite.load(logger, logutil.DEBUG, args, testsuite_directory=args.directories[-1]) if baseline: # discard the last argument as consumed above. logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1]) args.directories.pop() tests = testsuite.load_testsuite_or_tests(logger, args.directories, args) # And check if not tests: logger.error("Invalid testsuite or test directories") return 1 result_stats = stats.Results() exit_code = 125 # assume a 'git bisect' barf try: exit_code = results(logger, tests, baseline, args, result_stats) finally: if args.stats is Stats.details: result_stats.log_details(stderr_log, header="Details:", prefix=" ") if args.stats in [Stats.details, Stats.summary]: result_stats.log_summary(stderr_log, header="Summary:", prefix=" ") publish.json_results(logger, args) publish.json_summary(logger, args) return exit_code
def main(): parser = argparse.ArgumentParser(description="Run tests") # This argument's behaviour is overloaded; the shorter word "try" # is a python word. parser.add_argument("--retry", type=int, metavar="COUNT", default=1, help="which previously run tests should be retried: 0 selects not-started tests; 1 selects not-started+failed tests; -1 selects not-started+failed+passed tests (default is %(default)s)") parser.add_argument("--attempts", type=int, default=1, help="number of times to attempt a test before giving up; default %(default)s") parser.add_argument("--dry-run", "-n", action="store_true") parser.add_argument("--verbose", "-v", action="count", default=0) # Default to BACKUP under the current directory. Name is # arbitrary, chosen for its hopefully unique first letter # (avoiding Makefile, OBJ, README, ... :-). parser.add_argument("--backup-directory", metavar="DIRECTORY", default="BACKUP", help="backup existing <test>/OUTPUT to %(metavar)s/<date>/<test> (default: %(default)s)") parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="either a testsuite directory or a list of test directories") testsuite.add_arguments(parser) runner.add_arguments(parser) post.add_arguments(parser) logutil.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmrunner") logger.info("Options:") logger.info(" retry: %s", args.retry) logger.info(" attempts: %s", args.attempts) logger.info(" dry-run: %s", args.dry_run) logger.info(" backup-directory: %s", args.backup_directory) logger.info(" directories: %s", args.directories) testsuite.log_arguments(logger, args) runner.log_arguments(logger, args) post.log_arguments(logger, args) logutil.log_arguments(logger, args) tests = testsuite.load_testsuite_or_tests(logger, args.directories, args, log_level=logutil.INFO) if not tests: logger.error("test or testsuite directory invalid: %s", args.directories) return 1 test_stats = stats.Tests() result_stats = stats.Results() start_time = datetime.now() exit_code = 0 try: logger.info("run started at %s", start_time) test_count = 0 for test in tests: test_stats.add(test, "total") test_count += 1 # Would the number of tests to be [re]run be better? test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests)) ignore, details = testsuite.ignore(test, args) if ignore: result_stats.add_ignored(test, ignore) test_stats.add(test, "ignored") # No need to log all the ignored tests when an # explicit sub-set of tests is being run. For # instance, when running just one test. if not args.test_name: logger.info("%s: ignore (%s)", test_prefix, details) continue # Implement "--retry" as described above: if retry is -ve, # the test is always run; if there's no result, the test # is always run; skip passed tests; else things get a # little wierd. # Be lazy with gathering the results, don't run the # sanitizer or diff. old_result = post.mortem(test, args, skip_diff=True, skip_sanitize=True) if args.retry >= 0: if old_result: if old_result.passed: logger.info("%s: passed", test_prefix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) continue if args.retry == 0: logger.info("%s: %s (delete '%s' to re-test)", test_prefix, result, test.output_directory) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) continue test_stats.add(test, "retry") logger.info("%s: starting ...", test_prefix) test_stats.add(test, "tests") # Move the contents of the existing OUTPUT directory to # BACKUP_DIRECTORY. Do it file-by-file so that, at no # point, the directory is empty. # # By moving each test just before it is started a trail of # what tests were attempted at each run is left. # # XXX: During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a # test is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the # time taken to run a test) keeps increasing. # # Always moving the directory contents to the # BACKUP_DIRECTORY mitigates this some. saved_output_directory = None if os.path.exists(test.output_directory): saved_output_directory = os.path.join(args.backup_directory, start_time.strftime("%Y%m%d%H%M%S"), test.name) logger.info("moving contents of '%s' to '%s'", test.output_directory, saved_output_directory) # Copy "empty" OUTPUT directories too. args.dry_run or os.makedirs(saved_output_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(saved_output_directory, name) logger.debug("moving '%s' to '%s'", src, dst) args.dry_run or os.replace(src, dst) debugfile = None result = None # At least one iteration; above will have filtered out # skips and ignores for attempt in range(args.attempts): test_stats.add(test, "attempts") # Create the OUTPUT directory. try: if not args.dry_run: os.mkdir(test.output_directory) elif os.exists(test.output_directory): raise FileExistsError() except FileExistsError: # On first attempt, the OUTPUT directory will # be empty (see above) so no need to save. if attempt > 0: saved_output_directory = os.path.join(test.output_directory, str(attempt)) logger.info("moving contents of '%s' to '%s'", test.output_directory, saved_output_directory) args.dry_run or os.makedirs(saved_output_directory, exist_ok=True) for name in os.listdir(test.output_directory): if os.path.isfile(src): src = os.path.join(test.output_directory, name) dst = os.path.join(saved_output_directory, name) logger.debug("moving '%s' to '%s'", src, dst) args.dry_run or os.replace(src, dst) # Start a debug log in the OUTPUT directory; include # timing for this specific test attempt. with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")): logger.info("****** test %s attempt %d of %d started at %s ******", test.name, attempt+1, args.attempts, datetime.now()) if saved_output_directory: logger.info("contents of '%s' moved to '%s'", test.output_directory, saved_output_directory) saved_output_directory = None ending = "undefined" try: if not args.dry_run: runner.run_test(test, args) ending = "finished" result = post.mortem(test, args, update=(not args.dry_run)) if not args.dry_run: # Store enough to fool the script # pluto-testlist-scan.sh. logger.info("storing result in '%s'", test.result_file) with open(test.result_file, "w") as f: f.write('"result": "%s"\n' % result) except pexpect.TIMEOUT as e: logger.exception("**** test %s timed out ****", test.name) ending = "timed-out" # If the test has no output to check against, this will "pass" result = post.mortem(test, args, update=(not args.dry_run)) # Since the OUTPUT directory exists, all paths to # here should have a non-null RESULT. test_stats.add(test, "attempts", ending, str(result)) if result.errors: logger.info("****** test %s %s %s ******", test.name, result, result.errors) else: logger.info("****** test %s %s ******", test.name, result) if result.passed: break # Above will have set RESULT. During a control-c or crash # the below will not be executed. test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ") except KeyboardInterrupt: logger.exception("**** test %s interrupted ****", test.name) exit_code = 1 test_stats.log_details(args.verbose and logger.info or logger.debug, header="final stat details:", prefix=" ") result_stats.log_details(logger.info, header="final test details:", prefix=" ") test_stats.log_summary(logger.info, header="final test stats:", prefix=" ") result_stats.log_summary(logger.info, header="final test results:", prefix=" ") end_time = datetime.now() logger.info("run finished at %s after %s", end_time, end_time - start_time) return exit_code
def main(): parser = argparse.ArgumentParser( description= "list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]", epilog= "By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored). While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately). If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified." ) parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument( "--quick", action="store_true", help= ("Use the previously generated '.console.txt' and '.console.diff' files" )) parser.add_argument( "--quick-sanitize", action="store_true", help=("Use the previously generated '.console.txt' file")) parser.add_argument( "--quick-diff", action="store_true", help=("Use the previously generated '.console.diff' file")) parser.add_argument( "--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files")) parser.add_argument("--update-sanitize", action="store_true", help=("Update the '.console.txt' file")) parser.add_argument("--update-diff", action="store_true", help=("Update the '.console.diff' file")) parser.add_argument("--dump-args", action="store_true") parser.add_argument("--prefix", action="store", type=Prefix, choices=[p for p in Prefix], help="prefix to display with each test") # how to parse --print directory,saved-directory,...? parser.add_argument("--print", action="append", default=[], choices=[p for p in Print], type=Print, help="what information to display about each test") parser.add_argument( "--stats", action="store", default=Stats.summary, type=Stats, choices=[c for c in Stats], help="provide overview statistics; default: \"%(default)s\"") parser.add_argument( "--baseline", metavar="DIRECTORY", help="a %(metavar)s containing baseline testsuite output") parser.add_argument( "directories", metavar="DIRECTORY", nargs="+", help= "%(metavar)s containing: a test, a testsuite (contains a TESTLIST file), a TESTLIST file, test output, or testsuite output" ) # Note: this argument serves as documentation only. The # TEST-DIRECTORY argument always consumes all remaining arguments. parser.add_argument( "baseline", metavar="BASELINE-DIRECTORY", nargs="?", help= "an optional testsuite directory (contains a TESTLIST file) containing output from a previous test run" ) post.add_arguments(parser) testsuite.add_arguments(parser) logutil.add_arguments(parser) skip.add_arguments(parser) ignore.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmresults") # default to printing results if not args.print: args.print = [Print.result] # The option -vvvvvvv is a short circuit for these; make # re-ordering easy by using V as a counter. v = 0 if args.dump_args: logger.info("Arguments:") logger.info(" Stats: %s", args.stats) logger.info(" Print: %s", args.print) logger.info(" Prefix: %s", args.prefix) post.log_arguments(logger, args) testsuite.log_arguments(logger, args) logutil.log_arguments(logger, args) skip.log_arguments(logger, args) ignore.log_arguments(logger, args) return 0 # Try to find a baseline. If present, pre-load it. baseline = None if args.baseline: # An explict baseline testsuite, can be more forgiving in how # it is loaded. baseline = testsuite.load(logger, args, testsuite_directory=args.baseline, error_level=logutil.DEBUG) if not baseline: # Perhaps the baseline just contains output, magic up the # corresponding testsuite directory. baseline_directory = os.path.join(args.testing_directory, "pluto") baseline = testsuite.load( logger, args, testsuite_directory=baseline_directory, saved_testsuite_output_directory=args.baseline, error_level=logutil.DEBUG) if not baseline: logger.info("'%s' is not a baseline", args.baseline) return 1 elif len(args.directories) > 1: # If there is more than one directory then, perhaps, the last # one is a baseline. A baseline might be: a complete # testsuite snapshot; or just output saved as # testing/pluto/OUTPUT/TESTDIR. baseline = testsuite.load(logger, logutil.DEBUG, args, testsuite_directory=args.directories[-1]) if baseline: # discard the last argument as consumed above. logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1]) args.directories.pop() tests = testsuite.load_testsuite_or_tests(logger, args.directories, args) # And check if not tests: logger.error("Invalid testsuite or test directories") return 1 result_stats = stats.Results() try: results(logger, tests, baseline, args, result_stats) finally: if args.stats is Stats.details: result_stats.log_details(stderr_log, header="Details:", prefix=" ") if args.stats in [Stats.details, Stats.summary]: result_stats.log_summary(stderr_log, header="Summary:", prefix=" ") return 0
def main(): # If SIGUSR1, backtrace all threads; hopefully this is early # enough. faulthandler.register(signal.SIGUSR1) parser = argparse.ArgumentParser( description="Run tests", epilog="SIGUSR1 will dump all thread stacks") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("--pid-file", default="", help="file to store process id of KVMRUNNER") parser.add_argument( "directories", metavar="DIRECTORY", nargs="+", help= "a testsuite directory, a TESTLIST file, or a list of test directories" ) testsuite.add_arguments(parser) runner.add_arguments(parser) logutil.add_arguments(parser) skip.add_arguments(parser) ignore.add_arguments(parser) publish.add_arguments(parser) # These three calls go together args = parser.parse_args() logutil.config(args, sys.stdout) logger = logutil.getLogger("kvmrunner") logger.info("Options:") logger.info(" directories: %s", args.directories) logger.info(" verbose: %s", args.verbose) logger.info(" pid-file: %s", args.pid_file) testsuite.log_arguments(logger, args) runner.log_arguments(logger, args) logutil.log_arguments(logger, args) skip.log_arguments(logger, args) ignore.log_arguments(logger, args) publish.log_arguments(logger, args) if args.pid_file: pid = os.getpid() logger.info("writing pid %d to '%s'", pid, args.pid_file) with open(args.pid_file, "wt") as pidfile: pidfile.write("%d\n" % os.getpid()) tests = testsuite.load_testsuite_or_tests(logger, args.directories, args, log_level=logutil.INFO) if not tests: logger.error("test or testsuite directory invalid: %s", args.directories) return 1 if len(tests) == 1 and args.run_post_mortem is None: logger.warning( "skipping post-mortem.sh as only one test; use --run-post-mortem true to override this" ) args.run_post_mortem = False test_stats = stats.Tests() result_stats = stats.Results() try: exit_code = 0 logger.info("run started at %s", timing.START_TIME) runner.run_tests(logger, args, tests, test_stats, result_stats) except KeyboardInterrupt: logger.exception("**** interrupted ****") exit_code = 1 test_stats.log_details(args.verbose and logger.info or logger.debug, header="final stat details:", prefix=" ") result_stats.log_details(logger.info, header="final test details:", prefix=" ") test_stats.log_summary(logger.info, header="final test stats:", prefix=" ") result_stats.log_summary(logger.info, header="final test results:", prefix=" ") stop_time = datetime.now() logger.info("run finished at %s after %s", stop_time, stop_time - timing.START_TIME) return exit_code
def main(): parser = argparse.ArgumentParser(description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]", epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored). While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately). If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified.") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("--quick", action="store_true", help=("Use the previously generated '.console.txt' and '.console.diff' files")) parser.add_argument("--quick-sanitize", action="store_true", help=("Use the previously generated '.console.txt' file")) parser.add_argument("--quick-diff", action="store_true", help=("Use the previously generated '.console.diff' file")) parser.add_argument("--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files")) parser.add_argument("--update-sanitize", action="store_true", help=("Update the '.console.txt' file")) parser.add_argument("--update-diff", action="store_true", help=("Update the '.console.diff' file")) parser.add_argument("--dump-args", action="store_true") parser.add_argument("--prefix", action="store", type=Prefix, choices=[p for p in Prefix], help="prefix to display with each test") # how to parse --print directory,saved-directory,...? parser.add_argument("--print", action="append", default=[], choices=[p for p in Print], type=Print, help="what information to display about each test") parser.add_argument("--stats", action="store", default=Stats.summary, type=Stats, choices=[c for c in Stats], help="provide overview statistics; default: \"%(default)s\""); parser.add_argument("--baseline", metavar="DIRECTORY", help="a %(metavar)s containing baseline testsuite output") parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="%(metavar)s containing: a test, a testsuite (contains a TESTLIST file), a TESTLIST file, test output, or testsuite output") # Note: this argument serves as documentation only. The # TEST-DIRECTORY argument always consumes all remaining arguments. parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?", help="an optional testsuite directory (contains a TESTLIST file) containing output from a previous test run") post.add_arguments(parser) testsuite.add_arguments(parser) logutil.add_arguments(parser) skip.add_arguments(parser) ignore.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmresults") # default to printing results if not args.print: args.print = [Print.result] # The option -vvvvvvv is a short circuit for these; make # re-ordering easy by using V as a counter. v = 0 if args.dump_args: logger.info("Arguments:") logger.info(" Stats: %s", args.stats) logger.info(" Print: %s", args.print) logger.info(" Prefix: %s", args.prefix) post.log_arguments(logger, args) testsuite.log_arguments(logger, args) logutil.log_arguments(logger, args) skip.log_arguments(logger, args) ignore.log_arguments(logger, args) return 0 # Try to find a baseline. If present, pre-load it. baseline = None if args.baseline: # An explict baseline testsuite, can be more forgiving in how # it is loaded. baseline = testsuite.load(logger, args, testsuite_directory=args.baseline, error_level=logutil.DEBUG) if not baseline: # Perhaps the baseline just contains output, magic up the # corresponding testsuite directory. baseline_directory = os.path.join(args.testing_directory, "pluto") baseline = testsuite.load(logger, args, testsuite_directory=baseline_directory, saved_testsuite_output_directory=args.baseline, error_level=logutil.DEBUG) if not baseline: logger.info("'%s' is not a baseline", args.baseline) return 1 elif len(args.directories) > 1: # If there is more than one directory then, perhaps, the last # one is a baseline. A baseline might be: a complete # testsuite snapshot; or just output saved as # testing/pluto/OUTPUT/TESTDIR. baseline = testsuite.load(logger, logutil.DEBUG, args, testsuite_directory=args.directories[-1]) if baseline: # discard the last argument as consumed above. logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1]) args.directories.pop() tests = testsuite.load_testsuite_or_tests(logger, args.directories, args) # And check if not tests: logger.error("Invalid testsuite or test directories") return 1 result_stats = stats.Results() try: results(logger, tests, baseline, args, result_stats) finally: if args.stats is Stats.details: result_stats.log_details(stderr_log, header="Details:", prefix=" ") if args.stats in [Stats.details, Stats.summary]: result_stats.log_summary(stderr_log, header="Summary:", prefix=" ") return 0
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_total_time = None # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) with logger.time("processing test %s", test_prefix) as test_total_time: ignored, details = ignore.test(logger, args, test) if ignored: result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Be lazy when gathering the results, don't run the sanitizer # or diff. Let post.mortem figure out if the test finished. old_result = post.mortem(test, args, domain_prefix=domain_prefix, quick=True, finished=None) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) return if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create just the OUTPUT/ directory; if it already exists, # move any contents to BACKUP/. Do it file-by-file so that, # at no point, the OUTPUT/ directory is missing (having an # OUTPUT/ directory implies the test was started). # # Don't create the path. If the parent directory is missing, # this will fail. # # By backing up each test just before it is started, a trail # of what tests were attempted during each run is created. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a test # is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the time # taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is not # under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileNotFoundError: # Bail, something is messed up (for instance the parent directory doesn't exist). return except FileExistsError: backup_directory = os.path.join(args.backup_directory, test.name) logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, copy it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug log # writing to that directory; include timing for this test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join(test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: logger.exception("timeout while booting domains") # Bail before RESULT is written - being unable to # boot the domains is a disaster. The test is # UNRESOLVED. return # Run the scripts directly with logger.time("running scripts %s", " ".join(("%s:%s" % (host, script)) for host, script in test.host_script_tuples)) as test_script_time: with tcpdump.Dump(logger, domain_prefix, test.output_directory, [test_domain.domain for test_domain in test_domains.values()], enable=args.tcpdump): try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join(test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.output(open(output, "w")) try: for host, script in test.host_script_tuples: if args.stop_at == script: logger.error("stopping test run at (before executing) script %s", script) break test_domain = test_domains[host] test_domain.read_file_run(script) except pexpect.TIMEOUT as e: # A test ending with a timeout is still a # finished test. Analysis of the results # will detect this and flag it as a fail. logger.error("**** timeout out while running script %s ****", script) finally: # Close the redirected test-result log files for test_domain in test_domains.values(): logfile = test_domain.console.output() logfile.close() # Always disconnect from the test domains. logger.info("closing all test domains") for test_domain in test_domains.values(): logfile = test_domain.console.output() if logfile: logfile.close() test_domain.close() # The test finished. Aborts such as a failed boot, or a timeout, # will skip all the below. result = post.mortem(test, args, domain_prefix=domain_prefix, finished=True, update=True) logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.issues and " ", result.issues, suffix) # Since the the test finished (resolved in POSIX terminology)", # emit enough JSON to fool scripts like pluto-testlist-scan.sh. # # A test that timed-out or crashed, isn't considered resolved. # # A more robust way of doing this would be to mark each of the # console logs as complete as it is closed. # # More detailed information can be extracted from the debug.log. RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.status, jsonutil.result.result: result, jsonutil.result.issues: result.issues, jsonutil.result.hosts: test.host_names, jsonutil.result.time: jsonutil.ftime(test_total_time.start), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), jsonutil.result.total_time: round(test_total_time.seconds(), 1), } j = jsonutil.dumps(RESULT) logger.info("filling '%s' with json: %s", test.result_file(), j) with open(test.result_file(), "w") as f: f.write(j) f.write("\n") test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_start_time = test_total_time = None # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) with logger.time("processing test %s", test_prefix) as test_total_time: ignored, include_ignored, details = ignore.test(logger, args, test) if ignored and not include_ignored: result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") # No need to log all the ignored tests when an explicit # sub-set of tests is being run. For instance, when running # just one test. if not args.test_name: logger.info("$s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Be lazy with gathering the results, don't run the sanitizer or # diff. # # XXX: There is a bug here where the only difference is white # space. The test will show up as failed when it previously # showed up as a white-space pass. # # The presence of the RESULT file is a proxy for detecting that # the test was incomplete. old_result = post.mortem(test, args, test_finished=None, skip_diff=True, skip_sanitize=True) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) return if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create the OUTPUT/ directory; if it already exists, move any # contents to BACKUP/. Do it file-by-file so that, at no # point, the OUTPUT/ directory missing (presence of OUTPUT/ # implies the test was started). # # By backing up each test just before it is started, a trail # of what tests were attempted during each run is created. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a test # is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the time # taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is not # under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileExistsError: backup_directory = os.path.join(args.backup_directory, test.name) logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, copy it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug log # writing to that directory; include timing for this test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join(test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: logger.exception("timeout while booting domains", test.name) # Bail before RESULT is written - being unable to # boot the domains is a disaster. return # Run the scripts directly with logger.time("running scripts %s", " ".join(("%s:%s" % (host, script)) for host, script in test.host_script_tuples)) as test_script_time: try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join(test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.output(open(output, "w")) for host, script in test.host_script_tuples: if args.stop_at == script: logger.error("stopping test run at (before executing) script %s", script) break test_domain = test_domains[host] test_domain.read_file_run(script) result = post.mortem(test, args, test_finished=True, update=True) except pexpect.TIMEOUT as e: logger.exception("**** timeout out while running script %s ****", script) # Still peform post-mortem so that errors are # captured, but force the result to # incomplete. result = post.mortem(test, args, test_finished=False, update=True) finally: # Close the redirected test-result log files for test_domain in test_domains.values(): logfile = test_domain.console.output() logfile.close() # Always disconnect from the test domains. logger.info("closing all test domains") for test_domain in test_domains.values(): logfile = test_domain.console.output() if logfile: logfile.close() test_domain.close() # Above will have set RESULT. Exceptions such as control-c or # a crash bypass this code. logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.errors and " ", result.errors, suffix) # Since the test finished, emit enough JSON to fool scripts like # pluto-testlist-scan.sh. # # This also leaves a simple marker to indicate that the test # finished. # # A more robust way of doing this would be to mark each of the # console logs as complete as it is closed. # # More detailed information can be extracted from the debug.log. hosts = {} for host in sorted(test.host_names): if host in result.errors: hosts[host] = [error for error in result.errors[host]] else: hosts[host] = ["passed"] RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.expected_result, jsonutil.result.result: str(result), jsonutil.result.time: jsonutil.ftime(datetime.now()), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), jsonutil.result.total_time: round(test_total_time.seconds(), 1), jsonutil.result.hosts: hosts, } j = jsonutil.dumps(RESULT) logger.info("filling '%s' with json: %s", test.result_file(), j) with open(test.result_file(), "w") as f: f.write(j) f.write("\n") test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def __init__(self, test_directory, testing_directory, saved_test_output_directory, testsuite_output_directory, kind="kvmplutotest", expected_result="good"): self.logger = logutil.getLogger(__name__) # basics self.kind = kind self.expected_result = expected_result # The test's name is always identical to the test directory's # name (aka basename). However, since TEST_DIRECTORY could be # relative (for instance "." or "./..") it first needs to be # made absolute before the basename can be extracted. test_directory = os.path.realpath(test_directory) # The test's name is the same as the directory's basename. self.name = os.path.basename(test_directory) self.full_name = "test " + self.name # Construct the test's relative directory path such that it # always contains the test directory name (i.e., the test # name) as context. For instance: "." gets rewritten as # ../<test>; and ".." gets rewritten as "../../<test>". This # ensures that displayed paths always include some context. # For instance, given "kvmresult.py .", "../<test> passed" # (and not ". passed") will be displayed. self.directory = os.path.join(os.path.relpath(os.path.dirname(test_directory)), self.name) # Directory where the next test run's output should be # written. If a common testsuite output directory was # specified, use that. self.output_directory = ( testsuite_output_directory and os.path.join(testsuite_output_directory, self.name) or os.path.join(self.directory, "OUTPUT")) self.result_file = os.path.join(self.output_directory, "RESULT") # Directory containing saved output from a previous test run. # If the test's output directory was explicitly specified, say # as a parameter to kvmrunner.py vis: # # kvmresults.py testing/pluto/<test>/OUTPUT.OLD # kvmresults.py testing/pluto/OUTPUT/<test> # # than that directory, and not the next output-directory, will # be passed in and saved here. Otherwise it is None, and the # OUTPUT_DIRECTORY should be used. self.saved_output_directory = saved_test_output_directory # An instance of the test directory within a tree that # includes all the post-mortem sanitization scripts. If the # test results have been copied then this will be different to # test.directory. if testing_directory: self.sanitize_directory = os.path.realpath(os.path.join(testing_directory, "pluto", self.name)) else: for sanitize_directory in [self.directory, utils.directory("..", "pluto", self.name)]: # Tentative self.sanitize_directory = os.path.realpath(sanitize_directory) self.logger.debug("is '%s' a test sanitize directory?" % self.sanitize_directory) for path in [self.sanitize_directory, os.path.join(self.sanitize_directory, "..", "..", "default-testparams.sh"), os.path.join(self.sanitize_directory, "..", "..", "sanitizers")]: if not os.path.exists(path): self.logger.debug("test sanitize directory '%s' missing", path) self.sanitize_directory = None break; # will be filled in later self.domains = None self.initiators = None
def __init__(self, testsuite): self.logger = logutil.getLogger(__name__) self.testsuite = testsuite self.test_list = open(testsuite.testlist, 'r')
def main(): parser = argparse.ArgumentParser(description="Run tests") # This argument's behaviour is overloaded; the shorter word "try" # is a python word. parser.add_argument("--retry", type=int, metavar="COUNT", help="number of times a test should be attempted before giving up (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed); a negative %(metavar)s selects all tests; a zero %(metavar)s selects not-started tests; a positive %(metavar)s selects not-started, incomplete and failing tests; default is to select not-started tests") parser.add_argument("--dry-run", "-n", action="store_true") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("--output-directory", default=None, metavar="DIRECTORY", help="save test results as %(metavar)s/<test> instead of <test>/OUTPUT") parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="either a testsuite directory or a list of test directories") testsuite.add_arguments(parser) runner.add_arguments(parser) post.add_arguments(parser) logutil.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmrunner") logger.info("Options:") logger.info(" retry: %s", args.retry or "0 (default)") logger.info(" dry-run: %s", args.dry_run) logger.info(" output-directory: %s", args.output_directory or "<testsuite>/<test>/OUTPUT (default)") logger.info(" directories: %s", args.directories) testsuite.log_arguments(logger, args) runner.log_arguments(logger, args) post.log_arguments(logger, args) logutil.log_arguments(logger, args) tests = testsuite.load_testsuite_or_tests(logger, args.directories, args, testsuite_output_directory=args.output_directory, log_level=logutil.INFO) if not tests: logger.error("test or testsuite directory invalid: %s", args.directories) return 1 # A list of test directories was specified (i.e, not a testsuite), # then force the tests to run. if isinstance(tests, list) and args.retry is None: args.retry = 1; logger.info("Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry) # Use a default dict so no need to worry about initializing values # to zero. stats = Stats() results = Results() start_time = time.localtime() try: logger.info("run started at %s", datetime.now()) test_count = 0 for test in tests: stats.add("total", test) test_count += 1 # Would the number of tests to be [re]run be better? test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests)) ignore = testsuite.ignore(test, args) if ignore: stats.add("ignored", test) # No need to log all the ignored tests when an # explicit sub-set of tests is being run. For # instance, when running just one test. if not args.test_name: logger.info("%s: ignore (%s)", test_prefix, ignore) continue # Implement "--retry" as described above: if retry is -ve, # the test is always run; if there's no result, the test # is always run; skip passed tests; else things get a # little wierd. retry = args.retry or 0 if retry >= 0: result = post.mortem(test, args) if result: if result.passed: logger.info("%s: passed", test_prefix) stats.add("skipped", test) results.add(result) continue if retry == 0: logger.info("%s: %s (delete '%s' to re-test)", test_prefix, result, test.output_directory) stats.add("skipped", test) results.add(result) continue stats.add("retry", test) logger.info("%s: starting ...", test_prefix) stats.add("tests", test) debugfile = None result = None # At least one iteration; above will have filtered out # skips and ignores attempts = max(abs(retry), 1) for attempt in range(attempts): stats.add("attempts", test) # On first attempt (attempt == 0), empty the # <test>/OUTPUT/ directory of all contents. On # subsequent attempts, move the files from the # previous attempt to <test>/OUTPUT/<attempt>/. # # XXX: Don't just delete the OUTPUT/ directory as # this, for a short period, changes the status of the # test to never-run. # # XXX: During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time # a test is added and/or a test is run (adding files # under <test>/OUTPUT), the boot process (and # consequently the time taken to run a test) keeps # increasing. # # Mitigate this slightly by emptying <test>/OUTPUT # before starting any test attempts. It's assumed # that the previous test run was already captured # above with save-directory. if not args.dry_run: try: os.mkdir(test.output_directory) except FileExistsError: saved_output_directory = os.path.join(test.output_directory, str(attempt)) logger.info("emptying directory '%s'", test.output_directory) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) if attempt == 0: logger.debug(" remove '%s'", src) if os.path.isfile(src): os.remove(src) else: shutil.rmtree(src) elif os.path.isfile(src): dst = os.path.join(saved_output_directory, name) logger.debug(" move '%s' to '%s'", src, dst) os.makedirs(saved_output_directory, exist_ok=True) os.rename(src, dst) # Start a debug log in the OUTPUT directory; include # timing for this specific test attempt. with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")): logger.info("****** test %s attempt %d of %d started at %s ******", test.name, attempt+1, attempts, datetime.now()) ending = "undefined" try: if not args.dry_run: runner.run_test(test, max_workers=args.workers) ending = "finished" result = post.mortem(test, args, update=(not args.dry_run)) if not args.dry_run: # Store enough to fool the script # pluto-testlist-scan.sh. logger.info("storing result in '%s'", test.result_file) with open(test.result_file, "w") as f: f.write('"result": "%s"\n' % result) except pexpect.TIMEOUT as e: ending = "timeout" logger.exception("**** test %s timed out ****", test.name) result = post.mortem(test, args, update=(not args.dry_run)) # Since the OUTPUT directory exists, all paths to # here should have a non-null RESULT. stats.add("attempts(%s:%s)" % (ending, result), test) logger.info("****** test %s %s ******", test.name, result) if result.passed: break # Above will have set RESULT (don't reach here during # cntrl-c or crash). results.add(result) stats.add("tests(%s)" % result, test) except KeyboardInterrupt: logger.exception("**** test %s interrupted ****", test.name) return 1 finally: logger.info("run finished at %s", datetime.now()) level = args.verbose and logutil.INFO or logutil.DEBUG logger.log(level, "stat details:") stats.log_details(logger, level=level, prefix=" ") logger.info("result details:") results.log_details(logger, level=logutil.INFO, prefix=" ") logger.info("stat summary:") stats.log_summary(logger, level=logutil.INFO, prefix=" ") logger.info("result summary:") results.log_summary(logger, level=logutil.INFO, prefix=" ") return 0
def main(): parser = argparse.ArgumentParser(description="Run tests") # This argument's behaviour is overloaded; the shorter word "try" # is a python word. parser.add_argument("--retry", type=int, metavar="COUNT", default=1, help="which previously run tests should be retried: 0 selects not-started tests; 1 selects not-started+failed tests; -1 selects not-started+failed+passed tests (default is %(default)s)") parser.add_argument("--attempts", type=int, default=1, help="number of times to attempt a test before giving up; default %(default)s") parser.add_argument("--dry-run", "-n", action="store_true") parser.add_argument("--verbose", "-v", action="count", default=0) # Default to BACKUP under the current directory. Name is # arbitrary, chosen for its hopefully unique first letter # (avoiding Makefile, OBJ, README, ... :-). parser.add_argument("--backup-directory", metavar="DIRECTORY", default=os.path.join("BACKUP", time.strftime("%Y%m%d%H%M%S", time.localtime())), help="backup existing <test>/OUTPUT to %(metavar)s/<test> (default: %(default)s)") parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="either a testsuite directory or a list of test directories") testsuite.add_arguments(parser) runner.add_arguments(parser) post.add_arguments(parser) logutil.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmrunner") logger.info("Options:") logger.info(" retry: %s", args.retry) logger.info(" attempts: %s", args.attempts) logger.info(" dry-run: %s", args.dry_run) logger.info(" backup-directory: %s", args.backup_directory) logger.info(" directories: %s", args.directories) testsuite.log_arguments(logger, args) runner.log_arguments(logger, args) post.log_arguments(logger, args) logutil.log_arguments(logger, args) tests = testsuite.load_testsuite_or_tests(logger, args.directories, args, log_level=logutil.INFO) if not tests: logger.error("test or testsuite directory invalid: %s", args.directories) return 1 test_stats = stats.Tests() result_stats = stats.Results() start_time = datetime.now() try: logger.info("run started at %s", start_time) test_count = 0 for test in tests: test_stats.add("total", test) test_count += 1 # Would the number of tests to be [re]run be better? test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests)) ignore = testsuite.ignore(test, args) if ignore: result_stats.add_ignore(test, ignore) test_stats.add("ignored", test) # No need to log all the ignored tests when an # explicit sub-set of tests is being run. For # instance, when running just one test. if not args.test_name: logger.info("%s: ignore (%s)", test_prefix, ignore) continue # Implement "--retry" as described above: if retry is -ve, # the test is always run; if there's no result, the test # is always run; skip passed tests; else things get a # little wierd. # Be lazy with gathering the results, don't run the # sanitizer or diff. old_result = post.mortem(test, args, skip_diff=True, skip_sanitize=True) if args.retry >= 0: if old_result: if old_result.passed: logger.info("%s: passed", test_prefix) test_stats.add("skipped", test) result_stats.add_skip(old_result) continue if args.retry == 0: logger.info("%s: %s (delete '%s' to re-test)", test_prefix, result, test.output_directory) test_stats.add("skipped", test) result_stats.add_skip(old_result) continue test_stats.add("retry", test) logger.info("%s: starting ...", test_prefix) test_stats.add("tests", test) # Move the contents of the existing OUTPUT directory to # BACKUP_DIRECTORY. Do it file-by-file so that, at no # point, the directory is empty. # # By moving each test just before it is started a trail of # what tests were attempted at each run is left. # # XXX: During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a # test is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the # time taken to run a test) keeps increasing. # # Always moving the directory contents to the # BACKUP_DIRECTORY mitigates this some. saved_output_directory = None if os.path.exists(test.output_directory): saved_output_directory = os.path.join(args.backup_directory, test.name) logger.info("moving contents of '%s' to '%s'", test.output_directory, saved_output_directory) # Copy "empty" OUTPUT directories too. args.dry_run or os.makedirs(saved_output_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(saved_output_directory, name) logger.debug("moving '%s' to '%s'", src, dst) args.dry_run or os.replace(src, dst) debugfile = None result = None # At least one iteration; above will have filtered out # skips and ignores for attempt in range(args.attempts): test_stats.add("attempts", test) # Create the OUTPUT directory. try: if not args.dry_run: os.mkdir(test.output_directory) elif os.exists(test.output_directory): raise FileExistsError() except FileExistsError: # On first attempt, the OUTPUT directory will # be empty (see above) so no need to save. if attempt > 0: saved_output_directory = os.path.join(test.output_directory, str(attempt)) logger.info("moving contents of '%s' to '%s'", test.output_directory, saved_output_directory) args.dry_run or os.makedirs(saved_output_directory, exist_ok=True) for name in os.listdir(test.output_directory): if os.path.isfile(src): src = os.path.join(test.output_directory, name) dst = os.path.join(saved_output_directory, name) logger.debug("moving '%s' to '%s'", src, dst) args.dry_run or os.replace(src, dst) # Start a debug log in the OUTPUT directory; include # timing for this specific test attempt. with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")): logger.info("****** test %s attempt %d of %d started at %s ******", test.name, attempt+1, args.attempts, datetime.now()) if saved_output_directory: logger.info("contents of '%s' moved to '%s'", test.output_directory, saved_output_directory) saved_output_directory = None ending = "undefined" try: if not args.dry_run: runner.run_test(test, max_workers=args.workers) ending = "finished" result = post.mortem(test, args, update=(not args.dry_run)) if not args.dry_run: # Store enough to fool the script # pluto-testlist-scan.sh. logger.info("storing result in '%s'", test.result_file) with open(test.result_file, "w") as f: f.write('"result": "%s"\n' % result) except pexpect.TIMEOUT as e: ending = "timeout" logger.exception("**** test %s timed out ****", test.name) result = post.mortem(test, args, update=(not args.dry_run)) # Since the OUTPUT directory exists, all paths to # here should have a non-null RESULT. test_stats.add("attempts(%s:%s)" % (ending, result), test) if result.errors: logger.info("****** test %s %s %s ******", test.name, result, result.errors) else: logger.info("****** test %s %s ******", test.name, result) if result.passed: break # Above will have set RESULT. During a control-c or crash # the below will not be executed. test_stats.add("tests(%s)" % result, test) result_stats.add_result(result, old_result) test_stats.log_summary(logger.info, header="updated stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated results:", prefix=" ") except KeyboardInterrupt: logger.exception("**** test %s interrupted ****", test.name) return 1 level = args.verbose and logger.info or logger.debug test_stats.log_details(level, header="stat details:", prefix=" ") result_stats.log_details(logger.info, header="result details:", prefix=" ") test_stats.log_summary(logger.info, header="stat summary:", prefix=" ") result_stats.log_summary(logger.info, header="result summary:", prefix=" ") end_time = datetime.now() logger.info("run finished at %s after %s", end_time, end_time - start_time) return 0
def main(): parser = argparse.ArgumentParser(description="Run tests") # This argument's behaviour is overloaded; the shorter word "try" # is a python word. parser.add_argument( "--retry", type=int, metavar="COUNT", help= "number of times a test should be attempted before giving up (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed); a negative %(metavar)s selects all tests; a zero %(metavar)s selects not-started tests; a positive %(metavar)s selects not-started, incomplete and failing tests; default is to select not-started tests" ) parser.add_argument("--dry-run", "-n", action="store_true") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument( "--output-directory", default=None, metavar="DIRECTORY", help="save test results as %(metavar)s/<test> instead of <test>/OUTPUT" ) parser.add_argument( "directories", metavar="DIRECTORY", nargs="+", help="either a testsuite directory or a list of test directories") testsuite.add_arguments(parser) runner.add_arguments(parser) post.add_arguments(parser) logutil.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmrunner") logger.info("Options:") logger.info(" retry: %s", args.retry or "0 (default)") logger.info(" dry-run: %s", args.dry_run) logger.info(" output-directory: %s", args.output_directory or "<testsuite>/<test>/OUTPUT (default)") logger.info(" directories: %s", args.directories) testsuite.log_arguments(logger, args) runner.log_arguments(logger, args) post.log_arguments(logger, args) logutil.log_arguments(logger, args) tests = testsuite.load_testsuite_or_tests( logger, args.directories, args, testsuite_output_directory=args.output_directory, log_level=logutil.INFO) if not tests: logger.error("test or testsuite directory invalid: %s", args.directories) return 1 # A list of test directories was specified (i.e, not a testsuite), # then force the tests to run. if isinstance(tests, list) and args.retry is None: args.retry = 1 logger.info( "Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry) # Use a default dict so no need to worry about initializing values # to zero. stats = Stats() results = Results() start_time = time.localtime() try: logger.info("run started at %s", datetime.now()) test_count = 0 for test in tests: stats.add("total", test) test_count += 1 # Would the number of tests to be [re]run be better? test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests)) ignore = testsuite.ignore(test, args) if ignore: stats.add("ignored", test) # No need to log all the ignored tests when an # explicit sub-set of tests is being run. For # instance, when running just one test. if not args.test_name: logger.info("%s: ignore (%s)", test_prefix, ignore) continue # Implement "--retry" as described above: if retry is -ve, # the test is always run; if there's no result, the test # is always run; skip passed tests; else things get a # little wierd. retry = args.retry or 0 if retry >= 0: result = post.mortem(test, args) if result: if result.passed: logger.info("%s: passed", test_prefix) stats.add("skipped", test) results.add(result) continue if retry == 0: logger.info("%s: %s (delete '%s' to re-test)", test_prefix, result, test.output_directory) stats.add("skipped", test) results.add(result) continue stats.add("retry", test) logger.info("%s: starting ...", test_prefix) stats.add("tests", test) debugfile = None result = None # At least one iteration; above will have filtered out # skips and ignores attempts = max(abs(retry), 1) for attempt in range(attempts): stats.add("attempts", test) # Create an output directory. If there's already an # existing OUTPUT directory copy its contents to: # # OUTPUT/YYYYMMDDHHMMSS.ATTEMPT # # so, when re-running, earlier attempts are saved. Do # this before the OUTPUT/debug.log is started so that # each test attempt has its own log, and otherwise, it # too would be moved away. saved_output_directory = None saved_output = [] if not args.dry_run: try: os.mkdir(test.output_directory) except FileExistsError: # Include the time this test run started in # the suffix - that way all saved results can # be matched using a wild card. saved_output_directory = os.path.join( test.output_directory, "%s.%d" % (time.strftime( "%Y%m%d%H%M%S", start_time), attempt)) logger.debug("moving existing OUTPUT to '%s'", saved_output_directory) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(saved_output_directory, name) if os.path.isfile(src): os.makedirs(saved_output_directory, exist_ok=True) os.rename(src, dst) saved_output.append(name) logger.debug(" moved '%s' to '%s'", src, dst) # Start a debug log in the OUTPUT directory; include # timing for this specific test attempt. with logutil.TIMER, logutil.Debug( logger, os.path.join(test.output_directory, "debug.log")): logger.info( "****** test %s attempt %d of %d started at %s ******", test.name, attempt + 1, attempts, datetime.now()) # Add a log message about any saved output # directory to the per-test-attempt debug log. It # just looks better. if saved_output: logger.info("saved existing '%s' in '%s'", saved_output, saved_output_directory) ending = "undefined" try: if not args.dry_run: runner.run_test(test, max_workers=args.workers) ending = "finished" result = post.mortem(test, args, update=(not args.dry_run)) if not args.dry_run: # Store enough to fool the script # pluto-testlist-scan.sh. logger.info("storing result in '%s'", test.result_file) with open(test.result_file, "w") as f: f.write('"result": "%s"\n' % result) except pexpect.TIMEOUT as e: ending = "timeout" logger.exception("**** test %s timed out ****", test.name) result = post.mortem(test, args, update=(not args.dry_run)) # Since the OUTPUT directory exists, all paths to # here should have a non-null RESULT. stats.add("attempts(%s:%s)" % (ending, result), test) logger.info("****** test %s %s ******", test.name, result) if result.passed: break # Above will have set RESULT (don't reach here during # cntrl-c or crash). results.add(result) stats.add("tests(%s)" % result, test) except KeyboardInterrupt: logger.exception("**** test %s interrupted ****", test.name) return 1 finally: logger.info("run finished at %s", datetime.now()) level = args.verbose and logutil.INFO or logutil.DEBUG logger.log(level, "stat details:") stats.log_details(logger, level=level, prefix=" ") logger.info("result details:") results.log_details(logger, level=logutil.INFO, prefix=" ") logger.info("stat summary:") stats.log_summary(logger, level=logutil.INFO, prefix=" ") logger.info("result summary:") results.log_summary(logger, level=logutil.INFO, prefix=" ") return 0
def main(): parser = argparse.ArgumentParser(description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]", epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored). While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately). If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified.") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("--quick", action="store_true", help=("Use the previously generated '.console.txt' and '.console.diff' files")) parser.add_argument("--quick-sanitize", action="store_true", help=("Use the previously generated '.console.txt' file")) parser.add_argument("--quick-diff", action="store_true", help=("Use the previously generated '.console.diff' file")) parser.add_argument("--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files")) parser.add_argument("--update-sanitize", action="store_true", help=("Update the '.console.txt' file")) parser.add_argument("--update-diff", action="store_true", help=("Update the '.console.diff' file")) parser.add_argument("--prefix-directory", action="store_true") parser.add_argument("--prefix-name", action="store_true") parser.add_argument("--prefix-output-directory", action="store_true") parser.add_argument("--print-result", action="store_true") parser.add_argument("--print-diff", action="store_true") parser.add_argument("--print-args", action="store_true") parser.add_argument("--print-scripts", action="store_true") parser.add_argument("--print-domains", action="store_true") parser.add_argument("--print-initiators", action="store_true") parser.add_argument("--stats", action="store", default="summary", choices=["details", "summary", "none"], help="provide overview statistics; default: \"%(default)s\""); parser.add_argument("--list-ignored", action="store_true", help="include ignored tests in the list") parser.add_argument("--list-untested", action="store_true", help="include untested tests in the list") parser.add_argument("--baseline", metavar="DIRECTORY", help="a %(metavar)s containing baseline testsuite output") parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="%(metavar)s containing: a test, a testsuite (contains a TESTLIST file), test output, or testsuite output") # Note: this argument serves as documentation only. The # TEST-DIRECTORY argument always consumes all remaining arguments. parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?", help="an optional testsuite directory (contains a TESTLIST file) containing output from a previous test run") post.add_arguments(parser) testsuite.add_arguments(parser) logutil.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmresults") # default to printing results if not args.print_scripts \ and not args.print_result \ and not args.print_diff \ and not args.print_initiators \ and not args.print_domains: args.print_result = True # The option -vvvvvvv is a short circuit for these; make # re-ordering easy by using V as a counter. v = 0 args.prefix_directory = args.prefix_directory or args.verbose > v args.prefix_name = args.prefix_name or args.verbose > v args.print_result = args.print_result or args.verbose > v v += 1 args.prefix_output_directory = args.prefix_output_directory or args.verbose > v v += 1 args.list_untested = args.list_untested or args.verbose > v args.list_ignored = args.list_ignored or args.verbose > v v += 1 args.print_scripts = args.print_scripts or args.verbose > v v += 1 args.print_args = args.print_args or args.verbose > v if args.print_args: post.log_arguments(logger, args) testsuite.log_arguments(logger, args) logutil.log_arguments(logger, args) return 1 # Try to find a baseline. If present, pre-load it. baseline = None if args.baseline: # An explict baseline testsuite, can be more forgiving. baseline = testsuite.load(logger, args, testsuite_directory=args.baseline, testsuite_output_directory=None, error_level=logutil.DEBUG) if not baseline: # Assume that it is baseline output only. if args.testing_directory: baseline_directory = os.path.join(args.testing_directory, "pluto") else: baseline_directory = utils.directory("..", "pluto") baseline = testsuite.load(logger, args, testsuite_directory=baseline_directory, testsuite_output_directory=args.baseline, error_level=logutil.DEBUG) if not baseline: logger.info("'%s' is not a baseline", args.baseline) return 1 elif len(args.directories) > 1: # If there is more than one directory then, perhaps, the last # one is a baseline. A baseline might be: a complete # testsuite snapshot; or just output saved as # testing/pluto/OUTPUT/TESTDIR. baseline = testsuite.load(logger, args, testsuite_directory=args.directories[-1], testsuite_output_directory=None, error_level=logutil.DEBUG) if baseline: # discard the last argument as consumed above. logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1]) args.directories.pop() tests = testsuite.load_testsuite_or_tests(logger, args.directories, args) # And check if not tests: logger.error("Invalid testsuite or test directories") return 1 # When an explicit list of directories was specified always print # all of them (otherwise, tests seem to get lost). if isinstance(tests, list): args.list_untested = True result_stats = stats.Results() try: results(logger, tests, baseline, args, result_stats) finally: if args.stats == "details": result_stats.log_details(stderr_log, header="Details:", prefix=" ") if args.stats in ["details", "summary"]: result_stats.log_summary(stderr_log, header="Summary:", prefix=" ") return 0
def __init__(self, test_directory, testing_directory, saved_test_output_directory=None, saved_testsuite_output_directory=None, testsuite_output_directory=None, kind="kvmplutotest", expected_result="good"): self.logger = logutil.getLogger(__name__) # basics self.kind = kind self.expected_result = expected_result # The test's name is always identical to the test directory's # name (aka basename). However, since TEST_DIRECTORY could be # relative (for instance "." or "./..") it first needs to be # made absolute before the basename can be extracted. test_directory = os.path.realpath(test_directory) # The test's name is the same as the directory's basename. self.name = os.path.basename(test_directory) self.full_name = "test " + self.name # Construct the test's relative directory path such that it # always contains the test directory name (i.e., the test # name) as context. For instance: "." gets rewritten as # ../<test>; and ".." gets rewritten as "../../<test>". This # ensures that displayed paths always include some context. # For instance, given "kvmresult.py .", "../<test> passed" # (and not ". passed") will be displayed. self.directory = os.path.join(os.path.relpath(os.path.dirname(test_directory)), self.name) # Directory where the next test run's output should be # written. If a common testsuite output directory was # specified, use that. if testsuite_output_directory: self.output_directory = os.path.join(testsuite_output_directory, self.name) else: self.output_directory = os.path.join(self.directory, "OUTPUT") # Directory containing saved output from a previous test run. # If the test's output directory was explicitly specified, say # as a parameter to kvmrunner.py vis: # # kvmresults.py testing/pluto/<test>/OUTPUT.OLD # kvmresults.py testing/pluto/OUTPUT/<test> # # than that directory, and not the next output-directory, will # be passed in and saved here. Otherwise it is None, and the # OUTPUT_DIRECTORY should be used. if saved_test_output_directory: self.saved_output_directory = saved_test_output_directory elif saved_testsuite_output_directory: self.saved_output_directory = os.path.join(saved_testsuite_output_directory, self.name) else: self.saved_output_directory = None # An instance of the test directory within a tree that # includes all the post-mortem sanitization scripts. If the # test results have been copied then this will be different to # test.directory. self.sanitize_directory = os.path.realpath(os.path.join(testing_directory, "pluto", self.name)) # Get an ordered list of (host,script) pairs of all the # scripts that need to be run. self.host_script_tuples = scripts.host_script_tuples(self.directory) # Just assume any host mentioned in scripts needs to run. self.host_names = set() for host, script in self.host_script_tuples: self.host_names.add(host)
def main(): parser = argparse.ArgumentParser(description="Run tests") # This argument's behaviour is overloaded; the shorter word "try" # is a python word. parser.add_argument("--retry", type=int, metavar="COUNT", help=("number of times a test should be attempted before giving up" " (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed)" "; a negative %(metavar)s selects all tests" "; a zero %(metavar)s selects not-started tests" "; a positive %(metavar)s selects not-started, incomplete and failing tests" "; default is to select not-started tests")) parser.add_argument("--dry-run", "-n", action="store_true") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help=("Either a testsuite directory or" " a list of test directories")) testsuite.add_arguments(parser) runner.add_arguments(parser) post.add_arguments(parser) logutil.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmrunner") logger.info("Options:") logger.info(" retry: %s", args.retry or "0 (default)") logger.info(" dry-run: %s", args.dry_run) logger.info(" directories: %s", args.directories) testsuite.log_arguments(logger, args) runner.log_arguments(logger, args) post.log_arguments(logger, args) logutil.log_arguments(logger, args) tests = testsuite.load_testsuite_or_tests(logger, args.directories, log_level=logutil.INFO) if not tests: logger.error("test or testsuite directory invalid: %s", args.directory) return 1 # A list of test directories was specified (i.e, not a testsuite), # then force the tests to run. if isinstance(tests, list) and args.retry is None: args.retry = 1; logger.info("Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry) # Use a default dict so no need to worry about initializing values # to zero. stats = Stats() results = Results() start_time = time.localtime() try: logger.info("run started at %s", datetime.now()) for test in tests: stats.add("total", test) ignore = testsuite.ignore(test, args) if ignore: stats.add("ignored", test) # No need to log all the ignored tests when an # explicit sub-set of tests is being run. For # instance, when running just one test. if not args.test_name: logger.info("*** %s: ignore (%s)", test.name, ignore) continue # Implement "--retry" as described above: if retry is -ve, # the test is always run; if there's no result, the test # is always run; skip passed tests; else things get a # little wierd. retry = args.retry or 0 if retry >= 0: result = post.mortem(test, args) if result: if result.passed: logger.info("*** %s: passed", test.name) stats.add("skipped", test) results.add(result) continue if retry == 0: logger.info("*** %s: %s (delete '%s' to re-test)", test.name, result.value, test.output_directory) stats.add("skipped", test) results.add(result) continue stats.add("tests", test) debugfile = None result = None # At least one iteration; above will have filtered out # skips and ignores runs = max(abs(retry), 1) for run in range(runs): stats.add("runs", test) # Create an output directory. If there's already an # existing OUTPUT directory rename it to OUTPUT... # Need to do this before the OUTPUT/debug.log is # started as otherwise it too would get moved away. saved_output_directory = None if not args.dry_run: try: os.mkdir(test.output_directory) except FileExistsError: stats.add("reruns", test) # Include the time this test run started in # the suffix - that way all saved results can # be matched using a wild card. Include the # time the directory was last modified in the # suffix - it makes a good approximation as to # when the previous test run finished. stat = os.stat(test.output_directory) mtime = time.localtime(os.stat(test.output_directory).st_mtime) saved_output_directory = (test.output_directory + time.strftime(".%Y%m%d%H%M", start_time) + time.strftime(".%H%M%S", mtime)) logger.debug("renaming '%s' to '%s'", test.output_directory, saved_output_directory) os.rename(test.output_directory, saved_output_directory) # if the second attempt fails, let it crash os.mkdir(test.output_directory) # Start a debug log in the OUTPUT directory; include # timing for this specific test run. with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")): logger.info("****** test %s attempt %d of %d started at %s ******", test.name, run+1, runs, datetime.now()) # Add a log message about any saved output # directory to the per-test-run debug log. It # just looks better. if saved_output_directory: logger.info("existing OUTPUT saved in '%s'", saved_output_directory) ending = "undefined" try: if not args.dry_run: runner.run_test(test, max_workers=args.workers) ending = "finished" result = post.mortem(test, args, update=(not args.dry_run)) if not args.dry_run: # Store enough to fool the script # pluto-testlist-scan.sh. logger.info("storing result in '%s'", test.result_file) with open(test.result_file, "w") as f: f.write('"result": "') f.write(result.value) f.write('"') f.write("\n") except pexpect.TIMEOUT as e: ending = "timeout" logger.exception("**** test %s timed out ****", test.name) result = post.mortem(test, args, update=(not args.dry_run)) # Since the OUTPUT directory exists, all paths to # here should have a non-null RESULT. stats.add("runs(%s:%s)" % (ending, result.value), test) logger.info("****** test %s %s ******", test.name, result) if result.passed: break # Above will have set RESULT (don't reach here during # cntrl-c or crash). results.add(result) stats.add("tests(%s)" % result.value, test) except KeyboardInterrupt: logger.exception("**** test %s interrupted ****", test.name) return 1 finally: logger.info("run finished at %s", datetime.now()) level = args.verbose and logutil.INFO or logutil.DEBUG logger.log(level, "stat details:") stats.log_details(logger, level=level, prefix=" ") logger.info("result details:") results.log_details(logger, level=logutil.INFO, prefix=" ") logger.info("stat summary:") stats.log_summary(logger, level=logutil.INFO, prefix=" ") logger.info("result summary:") results.log_summary(logger, level=logutil.INFO, prefix=" ") return 0
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor): logger = logutil.getLogger(domain_prefix, __name__, test.name) prefix = "******" suffix = "******" test_stats.add(test, "total") test_runtime = test_boot_time = test_script_time = test_post_time = None old_result = None backup_directory = os.path.join(args.backup_directory, test.name) # Would the number of tests to be [re]run be better? test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count) publish.json_status(logger, args, "processing %s" % test_prefix) with logger.time("processing test %s", test_prefix): # Ignoring the test completely? # # So that there's no possible confusion over the test being # run; remove any pre-existing output. ignored, details = ignore.test(logger, args, test) if ignored: # The isdir() test followed by a simple move, while # racy, should be good enough. if os.path.isdir(test.output_directory): logger.info("moving '%s' to '%s'", test.output_directory, backup_directory) os.makedirs(os.path.dirname(backup_directory), exist_ok=True) os.rename(test.output_directory, backup_directory) result_stats.add_ignored(test, ignored) test_stats.add(test, "ignored") logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details, suffix) return # Skip the test, leaving old results? # # For instance, during a test re-run, skip any tests that are # passing. # # The check below compares the test and expected output, # ignoring any previous test result. This way the results are # consistent with kvmresults.py which always reflects the # current sources. # # - modifying the expected output so that it no longer matches # the last result is a fail # # - modifying the expected output so that it matches the last # result is a pass old_result = post.mortem(test, args, domain_prefix=domain_prefix, quick=False) if skip.result(logger, args, old_result): logger.info("%s %s skipped (previously %s) %s", prefix, test_prefix, old_result, suffix) test_stats.add(test, "skipped") result_stats.add_skipped(old_result) publish.everything(logger, args, old_result) return # Running the test ... # # From now on the test will be run so need to perform post # mortem. try: if old_result: test_stats.add(test, "tests", "retry") logger.info("%s %s started (previously %s) ....", prefix, test_prefix, old_result) else: test_stats.add(test, "tests", "try") logger.info("%s %s started ....", prefix, test_prefix) test_stats.add(test, "tests") # Create just the OUTPUT/ directory. # # If the directory already exists, copy the contents # BACKUP/. Do it file-by-file so that, at no point, the # OUTPUT/ directory is missing (having an OUTPUT/ # directory implies the test was started). # # Don't try to create the path. If the parent directory # is missing, this and the entire script will crash. # Someone did something nasty like deleted the parent # directory. # # By backing up each test just before it is started, # leaves a trail of what tests were attempted during a # test run. # # XXX: # # During boot, swan-transmogrify runs "chcon -R # testing/pluto". Of course this means that each time a # test is added and/or a test is run (adding files under # <test>/OUTPUT), the boot process (and consequently the # time taken to run a test) keeps increasing. # # By moving the directory contents to BACKUP/, which is # not under testing/pluto/ this problem is avoided. try: os.mkdir(test.output_directory) except FileExistsError: logger.info("moving contents of '%s' to '%s'", test.output_directory, backup_directory) # Even if OUTPUT/ is empty, move it. os.makedirs(backup_directory, exist_ok=True) for name in os.listdir(test.output_directory): src = os.path.join(test.output_directory, name) dst = os.path.join(backup_directory, name) logger.debug("moving '%s' to '%s'", src, dst) os.replace(src, dst) # Now that the OUTPUT directory is empty, start a debug # log writing to that directory; include timing for this # test run. with logger.debug_time("testing %s", test_prefix, logfile=os.path.join( test.output_directory, "debug.log"), loglevel=logutil.INFO) as test_runtime: # boot the domains with logger.time("booting domains") as test_boot_time: try: test_domains = _boot_test_domains( logger, test, domain_prefix, boot_executor) except pexpect.TIMEOUT: # Bail. Being unable to boot the domains is a # disaster. The test is UNRESOLVED. logger.exception("TIMEOUT while booting domains") return except pexpect.EOF: # Bail. Being unable to attach to the domains # is a disaster. The test is UNRESOLVED. logger.exception("EOF while booting domains") return except: logger.exception("EXCEPTION while booting domains") raise # Run the scripts directly with logger.time("running scripts %s", test.host_scripts) as test_script_time: with tcpdump.Dump( logger, domain_prefix, test.output_directory, [ test_domain.domain for test_domain in test_domains.values() ], enable=args.tcpdump): try: # re-direct the test-result log file for test_domain in test_domains.values(): output = os.path.join( test.output_directory, test_domain.domain.host_name + ".console.verbose.txt") test_domain.console.redirect_output( open(output, "w")) # If a script times out, don't try to run # post-mortem.sh. host_timed_out = None for script in test.host_scripts: test_domain = test_domains[script.host_name] try: test_domain.read_file_run(script.path) except pexpect.TIMEOUT as e: # A timeout while running a test # script is a sign that a command # hung. message = "%s while running script %s" % ( post.Issues.TIMEOUT, script) logger.warning("*** %s ***" % message) test_domain.console.append_output( "%s %s %s", post.LHS, message, post.RHS) host_timed_out = script.host_name break except pexpect.EOF as e: # An EOF while a script is running # is a sign that libvirt crashed. message = "%s while running script %s" % ( post.Issues.EOF, script) logger.exception("*** %s ***" % message) test_domain.console.append_output( "%s %s %s", post.LHS, message, post.RHS) host_timed_out = script.host_name break except BaseException as e: # if there is an exception, write # it to the console message = "%s %s while running script %s" % ( post.Issues.EXCEPTION, str(e), script) logger.exception("*** %s ***" % message) test_domain.console.append_output( "\n%s %s %s\n", post.LHS, message, post.RHS) raise if args.run_post_mortem is False: logger.warning( "+++ skipping script post-mortem.sh -- disabled +++" ) elif host_timed_out: logger.warning( "+++ skipping script post-mortem.sh -- %s timed out +++" % (host_timed_out)) else: # None or True script = "../bin/post-mortem.sh" for host_name in test.host_names: test_domain = test_domains[host_name] test_domain.console.append_output( "%s post-mortem %s", post.LHS, post.LHS) logger.info("running %s on %s", script, host_name) try: status = test_domain.console.run( script) if status: logger.error( "%s failed on %s with status %s", script, host_name, status) else: test_domain.console.append_output( "%s post-mortem %s", post.RHS, post.RHS) except pexpect.TIMEOUT as e: # A post-mortem ending with a # TIMEOUT gets treated as a # FAIL. message = "%s while running script %s" % ( post.Issues.TIMEOUT, script) logger.warning("*** %s ***" % message) test_domain.console.append_output( "%s %s %s", post.LHS, message, post.RHS) continue # always teardown except pexpect.EOF as e: # A post-mortem ending with an # EOF gets treated as # unresloved. message = "%s while running script %s" % ( post.Issues.EOF, script) logger.exception("*** %s ***" % message) test_domain.console.append_output( "%s %s %s", post.LHS, message, post.RHS) continue # always teardown except BaseException as e: # if there is an exception, write # it to the console message = "%s %s while running script %s" % ( post.Issues.EXCEPTION, str(e), script) logger.exception(message) test_domain.console.append_output( "\n%s %s %s\n", post.LHS, message, post.RHS) raise for test_domain in test_domains.values(): test_domain.console.append_output(post.DONE) finally: # Close the redirected test-result log files logger.info( "closing all the test domain log files") for test_domain in test_domains.values(): test_domain.console.close_output() # Always disconnect from the test domains. logger.info("closing all the test domains") for test_domain in test_domains.values(): test_domain.close() finally: with logger.time("post-mortem %s", test_prefix): # The test finished; it is assumed that post.mortem # can deal with a crashed test. result = post.mortem(test, args, domain_prefix=domain_prefix) logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.issues and " ", result.issues, suffix) result.save() # If the test was run (a fresh run would delete RESULT) # and finished (resolved in POSIX terminology), emit # enough JSON to fool scripts like pluto-testlist-scan.sh. # # A test that timed-out or crashed, isn't considered # resolved so the file isn't created. # # XXX: this should go away. result_file = os.path.join(test.output_directory, "RESULT") if not os.path.isfile(result_file) \ and result.resolution.isresolved(): RESULT = { jsonutil.result.testname: test.name, jsonutil.result.expect: test.status, jsonutil.result.result: result, jsonutil.result.issues: result.issues, jsonutil.result.hosts: test.host_names, jsonutil.result.time: jsonutil.ftime(test_runtime.start), jsonutil.result.runtime: round(test_runtime.seconds(), 1), jsonutil.result.boot_time: round(test_boot_time.seconds(), 1), jsonutil.result.script_time: round(test_script_time.seconds(), 1), } j = jsonutil.dumps(RESULT) logger.debug("filling '%s' with json: %s", result_file, j) with open(result_file, "w") as f: f.write(j) f.write("\n") # Do this after RESULT is created so it too is published. publish.everything(logger, args, result) publish.json_status(logger, args, "finished %s" % test_prefix) test_stats.add(test, "tests", str(result)) result_stats.add_result(result, old_result) # test_stats.log_summary(logger.info, header="updated test stats:", prefix=" ") result_stats.log_summary(logger.info, header="updated test results:", prefix=" ")
def main(): parser = argparse.ArgumentParser(description="list test results", epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored). While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("--quick", action="store_true", help=("Use the previously generated '.console.txt' and '.console.diff' files")) parser.add_argument("--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files")) parser.add_argument("--dump-args", action="store_true") # how to parse --print directory,saved-directory,...? parser.add_argument("--print", action="store", default=Print(Print.path, Print.result, Print.issues), type=Print, metavar=str(Print), help="comman separate list of attributes to print for each test; default: '%(default)s'") parser.add_argument("--stats", action="store", default=Stats.summary, type=Stats, choices=[c for c in Stats], help="provide overview statistics; default: \"%(default)s\""); baseline_metavar = "BASELINE-DIRECTORY" baseline_help = "additional %(metavar)s containing results to compare against; any divergence between the test and baseline results are displayed" parser.add_argument("--baseline", "-b", metavar=baseline_metavar, help=baseline_help) parser.add_argument("--json", action="store_true", help="output each result as an individual json object (pipe the output through 'jq -s .' to convert it to a well formed json list") parser.add_argument("directories", metavar="DIRECTORY-OR-FILE", nargs="+", help="a directory containing: a test, testsuite, test output, or testsuite output; or a file containing a 'TESTLIST'") # Note: this argument serves as documentation only. The RESULT # argument should consumes all remaining parameters. parser.add_argument("baseline_ignored", nargs="?", metavar=baseline_metavar, help=baseline_help) testsuite.add_arguments(parser) logutil.add_arguments(parser) skip.add_arguments(parser) ignore.add_arguments(parser) # These three calls go together args = parser.parse_args() logutil.config(args, sys.stderr) logger = logutil.getLogger("kvmresults") # The option -vvvvvvv is a short circuit for these; make # re-ordering easy by using V as a counter. v = 0 if args.dump_args: logger.info("Arguments:") logger.info(" Stats: %s", args.stats) logger.info(" Print: %s", args.print) logger.info(" Baseline: %s", args.baseline) logger.info(" Json: %s", args.json) logger.info(" Quick: %s", args.quick) logger.info(" Update: %s", args.update) testsuite.log_arguments(logger, args) logutil.log_arguments(logger, args) skip.log_arguments(logger, args) ignore.log_arguments(logger, args) return 0 # Try to find a baseline. If present, pre-load it. baseline = None if args.baseline: # An explict baseline testsuite, can be more forgiving in how # it is loaded. baseline = testsuite.load(logger, logutil.DEBUG, args, testsuite_directory=args.baseline, error_level=logutil.DEBUG) if not baseline: # Perhaps the baseline just contains output, magic up the # corresponding testsuite directory. baseline_directory = os.path.join(args.testing_directory, "pluto") baseline = testsuite.load(logger, logutil.DEBUG, args, testsuite_directory=baseline_directory, testsuite_output_directory=args.baseline, error_level=logutil.DEBUG) if not baseline: logger.info("'%s' is not a baseline", args.baseline) return 1 elif len(args.directories) > 1: # If there is more than one directory then, perhaps, the last # one is a baseline. A baseline might be: a complete # testsuite snapshot; or just output saved as # testing/pluto/OUTPUT/TESTDIR. baseline = testsuite.load(logger, logutil.DEBUG, args, testsuite_directory=args.directories[-1]) if baseline: # discard the last argument as consumed above. logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1]) args.directories.pop() tests = testsuite.load_testsuite_or_tests(logger, args.directories, args) # And check if not tests: logger.error("Invalid testsuite or test directories") return 1 result_stats = stats.Results() try: results(logger, tests, baseline, args, result_stats) finally: if args.stats is Stats.details: result_stats.log_details(stderr_log, header="Details:", prefix=" ") if args.stats in [Stats.details, Stats.summary]: result_stats.log_summary(stderr_log, header="Summary:", prefix=" ") return 0
def main(): parser = argparse.ArgumentParser(description="Run tests") # This argument's behaviour is overloaded; the shorter word "try" # is a python word. parser.add_argument( "--retry", type=int, metavar="COUNT", help= ("number of times a test should be attempted before giving up" " (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed)" "; a negative %(metavar)s selects all tests" "; a zero %(metavar)s selects not-started tests" "; a positive %(metavar)s selects not-started, incomplete and failing tests" "; default is to select not-started tests")) parser.add_argument("--dry-run", "-n", action="store_true") parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument("directories", metavar="DIRECTORY", nargs="*", help=("Either a testsuite directory or" " a list of test directories")) testsuite.add_arguments(parser) runner.add_arguments(parser) post.add_arguments(parser) logutil.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmrunner") logger.info("Options:") logger.info(" retry: %s", args.retry or "0 (default)") logger.info(" dry-run: %s", args.dry_run) logger.info(" directories: %s", args.directories) testsuite.log_arguments(logger, args) runner.log_arguments(logger, args) post.log_arguments(logger, args) logutil.log_arguments(logger, args) tests = testsuite.load_testsuite_or_tests(logger, args.directories, log_level=logutil.INFO) if not tests: logger.error("test or testsuite directory invalid: %s", args.directory) return 1 # A list of test directories was specified (i.e, not a testsuite), # then force the tests to run. if isinstance(tests, list) and args.retry is None: args.retry = 1 logger.info( "Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry) # Use a default dict so no need to worry about initializing values # to zero. stats = Stats() results = Results() start_time = time.localtime() try: logger.info("run started at %s", datetime.now()) for test in tests: stats.add("total", test) ignore = testsuite.ignore(test, args) if ignore: stats.add("ignored", test) # No need to log all the ignored tests when an # explicit sub-set of tests is being run. For # instance, when running just one test. if not args.test_name: logger.info("*** %s: ignore (%s)", test.name, ignore) continue # Implement "--retry" as described above: if retry is -ve, # the test is always run; if there's no result, the test # is always run; skip passed tests; else things get a # little wierd. retry = args.retry or 0 if retry >= 0: result = post.mortem(test, args) if result: if result.passed: logger.info("*** %s: passed", test.name) stats.add("skipped", test) results.add(result) continue if retry == 0: logger.info("*** %s: %s (delete '%s' to re-test)", test.name, result.value, test.output_directory) stats.add("skipped", test) results.add(result) continue stats.add("tests", test) debugfile = None result = None # At least one iteration; above will have filtered out # skips and ignores runs = max(abs(retry), 1) for run in range(runs): stats.add("runs", test) # Create an output directory. If there's already an # existing OUTPUT directory rename it to OUTPUT... # Need to do this before the OUTPUT/debug.log is # started as otherwise it too would get moved away. saved_output_directory = None if not args.dry_run: try: os.mkdir(test.output_directory) except FileExistsError: stats.add("reruns", test) # Include the time this test run started in # the suffix - that way all saved results can # be matched using a wild card. Include the # time the directory was last modified in the # suffix - it makes a good approximation as to # when the previous test run finished. stat = os.stat(test.output_directory) mtime = time.localtime( os.stat(test.output_directory).st_mtime) saved_output_directory = ( test.output_directory + time.strftime(".%Y%m%d%H%M", start_time) + time.strftime(".%H%M%S", mtime)) logger.debug("renaming '%s' to '%s'", test.output_directory, saved_output_directory) os.rename(test.output_directory, saved_output_directory) # if the second attempt fails, let it crash os.mkdir(test.output_directory) # Start a debug log in the OUTPUT directory; include # timing for this specific test run. with logutil.TIMER, logutil.Debug( logger, os.path.join(test.output_directory, "debug.log")): logger.info( "****** test %s attempt %d of %d started at %s ******", test.name, run + 1, runs, datetime.now()) # Add a log message about any saved output # directory to the per-test-run debug log. It # just looks better. if saved_output_directory: logger.info("existing OUTPUT saved in '%s'", saved_output_directory) ending = "undefined" try: if not args.dry_run: runner.run_test(test, max_workers=args.workers) ending = "finished" result = post.mortem(test, args, update=(not args.dry_run)) if not args.dry_run: # Store enough to fool the script # pluto-testlist-scan.sh. logger.info("storing result in '%s'", test.result_file) with open(test.result_file, "w") as f: f.write('"result": "') f.write(result.value) f.write('"') f.write("\n") except pexpect.TIMEOUT as e: ending = "timeout" logger.exception("**** test %s timed out ****", test.name) result = post.mortem(test, args, update=(not args.dry_run)) # Since the OUTPUT directory exists, all paths to # here should have a non-null RESULT. stats.add("runs(%s:%s)" % (ending, result.value), test) logger.info("****** test %s %s ******", test.name, result) if result.passed: break # Above will have set RESULT (don't reach here during # cntrl-c or crash). results.add(result) stats.add("tests(%s)" % result.value, test) except KeyboardInterrupt: logger.exception("**** test %s interrupted ****", test.name) return 1 finally: logger.info("run finished at %s", datetime.now()) level = args.verbose and logutil.INFO or logutil.DEBUG logger.log(level, "stat details:") stats.log_details(logger, level=level, prefix=" ") logger.info("result details:") results.log_details(logger, level=logutil.INFO, prefix=" ") logger.info("stat summary:") stats.log_summary(logger, level=logutil.INFO, prefix=" ") logger.info("result summary:") results.log_summary(logger, level=logutil.INFO, prefix=" ") return 0
def __init__(self, testsuite, error_level): self.error_level = error_level self.logger = logutil.getLogger(__name__) self.testsuite = testsuite self.test_list = open(testsuite.testlist, 'r')
def mortem(test, args, domain_prefix="", baseline=None, output_directory=None, quick=False): logger = logutil.getLogger(domain_prefix, __name__, test.name) test_result = TestResult(logger, test, quick, output_directory=output_directory) if not test_result: return test_result if not baseline: return test_result # For "baseline", the general idea is that "kvmresults.py | grep # baseline" should print something when either a regression or # progression has occurred. For instance: # # - a test passing but the baseline failing # # - a test failing, but the baseline passing # # - a test failing, and the baseline failling in a different way # # What isn't interesting is a test and the baseline failing the # same way. if not test.name in baseline: test_result.issues.add(Issues.ABSENT, "baseline") return test_result # When loading the baseline results use "quick" so that the # original results are used. This seems to be the best of a bad # bunch. # # Since that the baseline was generated using an old sanitizer and # reference output, using the latest sanitizer scripts (in # testing/) can, confusingly, lead to baselines results being # identified as failures failing yet the diffs show a pass. # # OTOH, when this goes to compare the results against the # baseline, first putting them through the latest sanitizer tends # to result in better diffs. base = baseline[test.name] baseline_result = TestResult(logger, base, quick=True) if not baseline_result.resolution in [ test_result.resolution.PASSED, test_result.resolution.FAILED ]: test_result.issues.add(str(baseline_result), "baseline") return test_result if test_result.resolution in [test_result.resolution.PASSED] \ and baseline_result.resolution in [baseline_result.resolution.PASSED]: return test_result for host_name in test.host_names: # result missing output; still check baseline .. if host_name not in test_result.sanitized_output: if host_name in baseline_result.sanitized_output: if host_name in baseline_result.diffs: test_result.issues.add(Issues.BASELINE_FAILED, host_name) else: test_result.issues.add(Issues.BASELINE_PASSED, host_name) continue if not host_name in baseline_result.sanitized_output: test_result.issues.add(Issues.BASELINE_MISSING, host_name) continue if not host_name in test_result.diffs: if host_name in baseline_result.diffs: test_result.issues.add(Issues.BASELINE_FAILED, host_name) continue if not host_name in baseline_result.diffs: test_result.issues.add(Issues.BASELINE_PASSED, host_name) continue baseline_diff = _diff( logger, "BASELINE/" + test.directory + "/" + host_name + ".console.txt", baseline_result.sanitized_output[host_name], "OUTPUT/" + test.directory + "/" + host_name + ".console.txt", test_result.sanitized_output[host_name]) if baseline_diff: baseline_whitespace = _whitespace( baseline_result.sanitized_output[host_name], test_result.sanitized_output[host_name]) if baseline_whitespace: test_result.issues.add(Issues.BASELINE_WHITESPACE, host_name) else: test_result.issues.add(Issues.BASELINE_DIFFERENT, host_name) # update the diff to something hopefully closer? # test_result.diffs[host_name] = baseline_diff # else: # test_result.issues.add("baseline-failed", host_name) return test_result
def main(): parser = argparse.ArgumentParser( description= "list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]", epilog= "By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored). While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately)." ) parser.add_argument("--verbose", "-v", action="count", default=0) parser.add_argument( "--quick", action="store_true", help= ("Use the previously generated '.console.txt' and '.console.diff' files" )) parser.add_argument( "--quick-sanitize", action="store_true", help=("Use the previously generated '.console.txt' file")) parser.add_argument( "--quick-diff", action="store_true", help=("Use the previously generated '.console.diff' file")) parser.add_argument( "--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files")) parser.add_argument("--update-sanitize", action="store_true", help=("Update the '.console.txt' file")) parser.add_argument("--update-diff", action="store_true", help=("Update the '.console.diff' file")) parser.add_argument("--print-directory", action="store_true") parser.add_argument("--print-name", action="store_true") parser.add_argument("--print-result", action="store_true") parser.add_argument("--print-diff", action="store_true") parser.add_argument("--print-args", action="store_true") parser.add_argument("--list-ignored", action="store_true", help="include ignored tests in the list") parser.add_argument("--list-untested", action="store_true", help="include untested tests in the list") parser.add_argument( "directories", metavar="TEST-DIRECTORY", nargs="+", help=("Either a testsuite (only one) or test directory")) # Note: this argument serves as documentation only. The # TEST-DIRECTORY argument always consume all remaining parameters. parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?", help=("An optional testsuite directory containing" " results from a previous test run")) post.add_arguments(parser) testsuite.add_arguments(parser) logutil.add_arguments(parser) args = parser.parse_args() logutil.config(args) logger = logutil.getLogger("kvmresults") # The option -vvvvvvv is a short circuit for these; make # re-ordering easy by using V as a counter. v = 0 args.print_directory = args.print_directory or args.verbose > v args.print_name = args.print_name or args.verbose > v v += 1 args.list_untested = args.list_untested or args.verbose > v v += 1 args.list_ignored = args.list_ignored or args.verbose > v v += 1 v += 1 args.print_args = args.print_args or args.verbose > v # By default print the relative directory path. if not args.print_directory and not args.print_name: args.print_directory = True if args.print_args: post.log_arguments(logger, args) testsuite.log_arguments(logger, args) logutil.log_arguments(logger, args) return 1 # If there is more than one directory then the last might be the # baseline. Try loading it as a testsuite (baselines are # testsuites) to see if that is the case. basetests = None tests = None if len(args.directories) > 1: # Perhaps the last argument is the baseline? Suppress any # nasty errors. basetests = testsuite.load(logger, args.directories[-1], error_level=logutil.DEBUG) if basetests: logger.debug("basetests loaded from '%s'", basetests.directory) args.directories.pop() tests = testsuite.load_testsuite_or_tests(logger, args.directories) logger.debug("basetests=%s", basetests) logger.debug("tests=%s", tests) # And check if not tests: logger.error("Invalid testsuite or test directories") return 1 # When an explicit list of directories was specified always print # all of them (otherwise, tests seem to get lost). if isinstance(tests, list): args.list_untested = True # Preload the baseline. This avoids re-scanning the TESTLIST. # Also, passing the full baseline to Test.results() lets that # function differentiate between a baseline missing results or # being entirely absent. baseline = None if basetests: baseline = {} for test in basetests: baseline[test.name] = test for test in tests: # Produce separate runtimes for each test. with logutil.TIMER: logger.debug("start processing test %s", test.name) # Filter out tests that are being ignored? ignore = testsuite.ignore(test, args) if ignore and not args.list_ignored: continue # Filter out tests that have not been run? result = None if not ignore: result = post.mortem( test, args, baseline=baseline, output_directory=test.old_output_directory, skip_sanitize=args.quick or args.quick_sanitize, skip_diff=args.quick or args.quick_diff, update=args.update, update_sanitize=args.update_sanitize, update_diff=args.update_diff) if not result and not args.list_untested: continue sep = "" if args.print_name: print(sep, end="") print(test.name, end="") sep = " " if args.print_directory: print(sep, end="") print(test.directory, end="") sep = " " if ignore: print(sep, end="") print("ignored", ignore, end="") sep = " " if result: print(sep, end="") print(result, end="") sep = " " print() if args.print_diff and result: for domain in result.diffs: for line in result.diffs[domain]: if line: print(line) sys.stdout.flush() logger.debug("stop processing test %s", test.name) return 0