Ejemplo n.º 1
0
def results(logger, tests, baseline, args, result_stats):

    for test in tests:

        publish.json_status(logger, args, "rebuilding %s" % test.name)

        # If debug logging is enabled this will provide fine grained
        # per-test timing.
        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                continue

            # Filter out test results that are being skipped.
            #
            # XXX: In the default case (skip=[UNTESTED]) this should
            # be cheap (does OUTPUT/ exist?).  It isn't, instead a
            # full post-mortem analysis is performed.
            #
            # This is noticeable when printing static test value such
            # as the test's name takes far longer than one would
            # expect.
            result = post.mortem(test,
                                 args,
                                 baseline=baseline,
                                 output_directory=test.saved_output_directory,
                                 quick=args.quick)
            if args.update:
                result.save()
            if args.skip:
                if skip.result(logger, args, result):
                    result_stats.add_skipped(result)
                    continue
            result_stats.add_result(result)

            publish.test_files(logger, args, result)
            publish.test_output_files(logger, args, result)
            publish.json_result(logger, args, result)

            if baseline and post.Issues.CRASHED.isdisjoint(result.issues):
                # Since there is a baseline and the test didn't crash
                # limit what is printed to just those where the
                # baseline's result is different.
                #
                # Note that, this skips baseline-different - where the
                # baseline failed for a different reason.
                if {post.Issues.BASELINE_FAILED,
                        post.Issues.BASELINE_PASSED}.isdisjoint(result.issues):
                    continue

            b = args.json and printer.JsonBuilder(
                sys.stdout) or printer.TextBuilder(sys.stdout)
            printer.build_result(logger, result, baseline, args, args.print, b)

    publish.json_status(logger, args, "finished")
Ejemplo n.º 2
0
def results(logger, tests, baseline, args, result_stats):

    for test in tests:

        publish.json_status(logger, args, "rebuilding %s" % test.name)

        # If debug logging is enabled this will provide fine grained
        # per-test timing.
        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                continue

            # Filter out test results that are being skipped.
            #
            # XXX: In the default case (skip=[UNTESTED]) this should
            # be cheap (does OUTPUT/ exist?).  It isn't, instead a
            # full post-mortem analysis is performed.
            #
            # This is noticable when printing static test value such
            # as the test's name takes far longer than one would
            # expect.
            result = post.mortem(test,
                                 args,
                                 baseline=baseline,
                                 output_directory=test.saved_output_directory,
                                 quick=args.quick,
                                 update=args.update)
            if args.skip:
                if skip.result(logger, args, result):
                    result_stats.add_skipped(result)
                    continue
            result_stats.add_result(result)

            publish.test_files(logger, args, result)
            publish.test_output_files(logger, args, result)
            publish.json_result(logger, args, result)

            # If there is a baseline; limit what is printed to just
            # those that differ.
            if baseline:
                baseline_issue = False
                for issue in result.issues:
                    if "baseline" in issue:
                        baseline_issue = True
                        break
                if not baseline_issue:
                    continue
            b = args.json and printer.JsonBuilder(
                sys.stdout) or printer.TextBuilder(sys.stdout)
            printer.build_result(logger, result, baseline, args, args.print, b)

    publish.json_status(logger, args, "finished")
Ejemplo n.º 3
0
def results(logger, tests, baseline, args, result_stats):

    for test in tests:

        publish.json_status(logger, args, "rebuilding %s" % test.name)

        # If debug logging is enabled this will provide fine grained
        # per-test timing.
        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                continue

            # Filter out test results that are being skipped.
            #
            # XXX: In the default case (skip=[UNTESTED]) this should
            # be cheap (does OUTPUT/ exist?).  It isn't, instead a
            # full post-mortem analysis is performed.
            #
            # This is noticeable when printing static test value such
            # as the test's name takes far longer than one would
            # expect.
            result = post.mortem(test, args,
                                 baseline=baseline,
                                 output_directory=test.saved_output_directory,
                                 quick=args.quick)
            if args.update:
                result.save()
            if args.skip:
                if skip.result(logger, args, result):
                    result_stats.add_skipped(result)
                    continue
            result_stats.add_result(result)

            publish.test_files(logger, args, result)
            publish.test_output_files(logger, args, result)
            publish.json_result(logger, args, result)

            if baseline and post.Issues.CRASHED.isdisjoint(result.issues):
                # Since there is a baseline and the test didn't crash
                # limit what is printed to just those where the
                # baseline's result is different.
                #
                # Note that, this skips baseline-different - where the
                # baseline failed for a different reason.
                if {post.Issues.BASELINE_FAILED, post.Issues.BASELINE_PASSED}.isdisjoint(result.issues):
                    continue

            b = args.json and printer.JsonBuilder(sys.stdout) or printer.TextBuilder(sys.stdout)
            printer.build_result(logger, result, baseline, args, args.print, b)

    publish.json_status(logger, args, "finished")
Ejemplo n.º 4
0
 def result(self, reason):
     if self.cached_result:
         return self.cached_result
     self.logger.debug("loading results for %s", reason)
     self.cached_result = post.mortem(self.test, self.args,
                                      baseline=self.baseline,
                                      output_directory=self.test.saved_output_directory,
                                      quick=self.args.quick, update=self.args.update,
                                      finished=None)
     self.result_stats.add_result(self.cached_result)
     return self.cached_result
Ejemplo n.º 5
0
 def result(self, reason):
     if self.cached_result:
         return self.cached_result
     self.logger.debug("loading results for %s", reason)
     self.cached_result = post.mortem(
         self.test,
         self.args,
         baseline=self.baseline,
         output_directory=self.test.saved_output_directory,
         quick=self.args.quick,
         update=self.args.update)
     self.result_stats.add_result(self.cached_result)
     return self.cached_result
Ejemplo n.º 6
0
def results(logger, tests, baseline, args, result_stats):

    for test in tests:

        # If debug logging is enabled this will provide fine grained
        # per-test timing.
        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, include_ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                if not include_ignored:
                    continue

            # Filter out tests that have not been run
            result = None
            if not ignored and Print.result in args.print:
                result = post.mortem(
                    test,
                    args,
                    baseline=baseline,
                    output_directory=test.saved_output_directory,
                    test_finished=None,
                    skip_sanitize=args.quick or args.quick_sanitize,
                    skip_diff=args.quick or args.quick_diff,
                    update=args.update,
                    update_sanitize=args.update_sanitize,
                    update_diff=args.update_diff)
                if skip.result(logger, args, result):
                    continue

                if not result:
                    result_stats.add_ignored(test, str(result))
                else:
                    result_stats.add_result(result)

            sep = ""

            # Print the test's name/path
            if not args.prefix:
                # By default: when the path given on the command line
                # explicitly specifies a test's output directory
                # (found in TEST.SAVED_OUTPUT_DIRECTORY), print that;
                # otherwise print the path to the test's directory.
                print(sep, end="")
                print((test.saved_output_directory
                       and test.saved_output_directory or test.directory),
                      end="")
                sep = " "
            else:
                # Print the test name/path per command line
                if args.prefix is Prefix.name:
                    print(sep, end="")
                    print(test.name, end="")
                    sep = " "
                elif args.prefix is Prefix.test_directory:
                    print(sep, end="")
                    print(test.directory, end="")
                    sep = " "
                elif args.prefix is Prefix.output_directory:
                    print(sep, end="")
                    print((test.saved_output_directory
                           and test.saved_output_directory
                           or test.output_directory),
                          end="")
                    sep = " "

            if ignored:
                print(sep, end="")
                print("ignored", ignored, end="")
                sep = " "

            for p in args.print:
                if p in [Print.diffs]:
                    continue
                elif p is Print.directory:
                    print(sep, end="")
                    print(test.directory, end="")
                    sep = " "
                elif p is Print.expected_result:
                    print(sep, end="")
                    print(test.expected_result, end="")
                    sep = " "
                elif p is Print.full_name:
                    print(sep, end="")
                    print(test.full_name, end="")
                    sep = " "
                elif p is Print.host_names:
                    for name in test.host_names:
                        print(sep, end="")
                        print(name, end="")
                        sep = ","
                    sep = " "
                elif p is Print.kind:
                    print(sep, end="")
                    print(test.kind, end="")
                    sep = " "
                elif p is Print.name:
                    print(sep, end="")
                    print(test.name, end="")
                    sep = " "
                elif p is Print.output_directory:
                    print(sep, end="")
                    print(test.output_directory, end="")
                    sep = " "
                elif p is Print.result:
                    if result:
                        print(sep, end="")
                        if result.errors:
                            print(result, result.errors, end="")
                        else:
                            print(result, end="")
                        sep = " "
                elif p is Print.sanitize_directory:
                    print(sep, end="")
                    print(test.sanitize_directory, end="")
                    sep = " "
                elif p is Print.saved_output_directory:
                    print(sep, end="")
                    print(test.saved_output_directory, end="")
                    sep = " "
                elif p is Print.scripts:
                    for host, script in test.host_script_tuples:
                        print(sep, end="")
                        print("%s:%s" % (host, script), end="")
                        sep = ","
                    sep = " "
                else:
                    print()
                    print("unknown print option", p)

            print()

            if Print.diffs in args.print and result:
                for domain in result.diffs:
                    for line in result.diffs[domain]:
                        if line:
                            print(line)

            sys.stdout.flush()
Ejemplo n.º 7
0
def results(logger, tests, baseline, args, result_stats):

    for test in tests:

        # If debug logging is enabled this will provide fine grained
        # per-test timing.
        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, include_ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                if not include_ignored:
                    continue

            # Filter out tests that have not been run
            result = None
            if not ignored and Print.result in args.print:
                result = post.mortem(test, args, baseline=baseline,
                                     output_directory=test.saved_output_directory,
                                     test_finished=None,
                                     skip_sanitize=args.quick or args.quick_sanitize,
                                     skip_diff=args.quick or args.quick_diff,
                                     update=args.update,
                                     update_sanitize=args.update_sanitize,
                                     update_diff=args.update_diff)
                if skip.result(logger, args, result):
                    continue

                if not result:
                    result_stats.add_ignored(test, str(result))
                else:
                    result_stats.add_result(result)

            sep = ""

            # Print the test's name/path
            if not args.prefix:
                # By default: when the path given on the command line
                # explicitly specifies a test's output directory
                # (found in TEST.SAVED_OUTPUT_DIRECTORY), print that;
                # otherwise print the path to the test's directory.
                print(sep, end="")
                print((test.saved_output_directory
                       and test.saved_output_directory
                       or test.directory), end="")
                sep = " "
            else:
                # Print the test name/path per command line
                if args.prefix is Prefix.name:
                    print(sep, end="")
                    print(test.name, end="")
                    sep = " "
                elif args.prefix is Prefix.test_directory:
                    print(sep, end="")
                    print(test.directory, end="")
                    sep = " "
                elif args.prefix is Prefix.output_directory:
                    print(sep, end="")
                    print((test.saved_output_directory
                           and test.saved_output_directory
                           or test.output_directory), end="")
                    sep = " "

            if ignored:
                print(sep, end="")
                print("ignored", ignored, end="")
                sep = " "

            for p in args.print:
                if p in [Print.diffs]:
                    continue
                elif p is Print.directory:
                    print(sep, end="")
                    print(test.directory, end="")
                    sep = " "
                elif p is Print.expected_result:
                    print(sep, end="")
                    print(test.expected_result, end="")
                    sep = " "
                elif p is Print.full_name:
                    print(sep, end="")
                    print(test.full_name, end="")
                    sep = " "
                elif p is Print.host_names:
                    for name in test.host_names:
                        print(sep, end="")
                        print(name, end="")
                        sep = ","
                    sep = " "
                elif p is Print.kind:
                    print(sep, end="")
                    print(test.kind, end="")
                    sep = " "
                elif p is Print.name:
                    print(sep, end="")
                    print(test.name, end="")
                    sep = " "
                elif p is Print.output_directory:
                    print(sep, end="")
                    print(test.output_directory, end="")
                    sep = " "
                elif p is Print.result:
                    if result:
                        print(sep, end="")
                        if result.errors:
                            print(result, result.errors, end="")
                        else:
                            print(result, end="")
                        sep = " "
                elif p is Print.sanitize_directory:
                    print(sep, end="")
                    print(test.sanitize_directory, end="")
                    sep = " "
                elif p is Print.saved_output_directory:
                    print(sep, end="")
                    print(test.saved_output_directory, end="")
                    sep = " "
                elif p is Print.scripts:
                    for host, script in test.host_script_tuples:
                        print(sep, end="")
                        print("%s:%s" % (host, script), end="")
                        sep = ","
                    sep = " "
                else:
                    print()
                    print("unknown print option", p)

            print()

            if Print.diffs in args.print and result:
                for domain in result.diffs:
                    for line in result.diffs[domain]:
                        if line:
                            print(line)

            sys.stdout.flush()
Ejemplo n.º 8
0
def _process_test(domain_prefix, test, args, test_stats, result_stats,
                  test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_total_time = None

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    with logger.time("processing test %s", test_prefix) as test_total_time:

        ignored, details = ignore.test(logger, args, test)
        if ignored:
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details,
                        suffix)
            return

        # Be lazy when gathering the results, don't run the sanitizer
        # or diff.  Let post.mortem figure out if the test finished.

        old_result = post.mortem(test,
                                 args,
                                 domain_prefix=domain_prefix,
                                 quick=True,
                                 finished=None)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s", prefix,
                        test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            return

        if old_result:
            test_stats.add(test, "tests", "retry")
            logger.info("%s %s started (previously %s) ....", prefix,
                        test_prefix, old_result)
        else:
            test_stats.add(test, "tests", "try")
            logger.info("%s %s started ....", prefix, test_prefix)
        test_stats.add(test, "tests")

        # Create just the OUTPUT/ directory; if it already exists,
        # move any contents to BACKUP/.  Do it file-by-file so that,
        # at no point, the OUTPUT/ directory is missing (having an
        # OUTPUT/ directory implies the test was started).
        #
        # Don't create the path.  If the parent directory is missing,
        # this will fail.
        #
        # By backing up each test just before it is started, a trail
        # of what tests were attempted during each run is created.
        #
        # XXX:
        #
        # During boot, swan-transmogrify runs "chcon -R
        # testing/pluto".  Of course this means that each time a test
        # is added and/or a test is run (adding files under
        # <test>/OUTPUT), the boot process (and consequently the time
        # taken to run a test) keeps increasing.
        #
        # By moving the directory contents to BACKUP/, which is not
        # under testing/pluto/ this problem is avoided.

        try:
            os.mkdir(test.output_directory)
        except FileNotFoundError:
            # Bail, something is messed up (for instance the parent directory doesn't exist).
            return
        except FileExistsError:
            backup_directory = os.path.join(args.backup_directory, test.name)
            logger.info("moving contents of '%s' to '%s'",
                        test.output_directory, backup_directory)
            # Even if OUTPUT/ is empty, copy it.
            os.makedirs(backup_directory, exist_ok=True)
            for name in os.listdir(test.output_directory):
                src = os.path.join(test.output_directory, name)
                dst = os.path.join(backup_directory, name)
                logger.debug("moving '%s' to '%s'", src, dst)
                os.replace(src, dst)

        # Now that the OUTPUT directory is empty, start a debug log
        # writing to that directory; include timing for this test run.

        with logger.debug_time("testing %s",
                               test_prefix,
                               logfile=os.path.join(test.output_directory,
                                                    "debug.log"),
                               loglevel=logutil.INFO) as test_runtime:

            with logger.time("booting domains") as test_boot_time:
                try:
                    test_domains = _boot_test_domains(logger, test,
                                                      domain_prefix,
                                                      boot_executor)
                except pexpect.TIMEOUT:
                    logger.exception("timeout while booting domains")
                    # Bail before RESULT is written - being unable to
                    # boot the domains is a disaster.  The test is
                    # UNRESOLVED.
                    return

            # Run the scripts directly
            with logger.time(
                    "running scripts %s",
                    " ".join(("%s:%s" % (host, script)) for host, script in
                             test.host_script_tuples)) as test_script_time:
                with tcpdump.Dump(logger,
                                  domain_prefix,
                                  test.output_directory, [
                                      test_domain.domain
                                      for test_domain in test_domains.values()
                                  ],
                                  enable=args.tcpdump):

                    try:

                        # re-direct the test-result log file
                        for test_domain in test_domains.values():
                            output = os.path.join(
                                test.output_directory,
                                test_domain.domain.host_name +
                                ".console.verbose.txt")
                            test_domain.console.output(open(output, "w"))

                        try:
                            for host, script in test.host_script_tuples:
                                if args.stop_at == script:
                                    logger.error(
                                        "stopping test run at (before executing) script %s",
                                        script)
                                    break
                                test_domain = test_domains[host]
                                test_domain.read_file_run(script)
                        except pexpect.TIMEOUT as e:
                            # A test ending with a timeout is still a
                            # finished test.  Analysis of the results
                            # will detect this and flag it as a fail.
                            logger.error(
                                "**** timeout out while running script %s ****",
                                script)

                    finally:

                        # Close the redirected test-result log files
                        for test_domain in test_domains.values():
                            logfile = test_domain.console.output()
                            logfile.close()

                        # Always disconnect from the test domains.
                        logger.info("closing all test domains")
                        for test_domain in test_domains.values():
                            logfile = test_domain.console.output()
                            if logfile:
                                logfile.close()
                            test_domain.close()

    # The test finished.  Aborts such as a failed boot, or a timeout,
    # will skip all the below.

    result = post.mortem(test,
                         args,
                         domain_prefix=domain_prefix,
                         finished=True,
                         update=True)

    logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.issues
                and " ", result.issues, suffix)

    # Since the the test finished (resolved in POSIX terminology)",
    # emit enough JSON to fool scripts like pluto-testlist-scan.sh.
    #
    # A test that timed-out or crashed, isn't considered resolved.
    #
    # A more robust way of doing this would be to mark each of the
    # console logs as complete as it is closed.
    #
    # More detailed information can be extracted from the debug.log.

    RESULT = {
        jsonutil.result.testname: test.name,
        jsonutil.result.expect: test.status,
        jsonutil.result.result: result,
        jsonutil.result.issues: result.issues,
        jsonutil.result.hosts: test.host_names,
        jsonutil.result.time: jsonutil.ftime(test_total_time.start),
        jsonutil.result.runtime: round(test_runtime.seconds(), 1),
        jsonutil.result.boot_time: round(test_boot_time.seconds(), 1),
        jsonutil.result.script_time: round(test_script_time.seconds(), 1),
        jsonutil.result.total_time: round(test_total_time.seconds(), 1),
    }
    j = jsonutil.dumps(RESULT)
    logger.info("filling '%s' with json: %s", test.result_file(), j)
    with open(test.result_file(), "w") as f:
        f.write(j)
        f.write("\n")

    test_stats.add(test, "tests", str(result))
    result_stats.add_result(result, old_result)
    test_stats.log_summary(logger.info,
                           header="updated test stats:",
                           prefix="  ")
    result_stats.log_summary(logger.info,
                             header="updated test results:",
                             prefix="  ")
Ejemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT",
                        help="number of times a test should be attempted before giving up (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed); a negative %(metavar)s selects all tests; a zero %(metavar)s selects not-started tests; a positive %(metavar)s selects not-started, incomplete and failing tests; default is to select not-started tests")
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("--output-directory", default=None, metavar="DIRECTORY",
                        help="save test results as %(metavar)s/<test> instead of <test>/OUTPUT")
    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  output-directory: %s", args.output_directory or "<testsuite>/<test>/OUTPUT (default)")
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              testsuite_output_directory=args.output_directory,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1;
        logger.info("Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        test_count = 0
        for test in tests:
            stats.add("total", test)
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests))

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("%s: passed", test_prefix)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)", test_prefix,
                                    result, test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    stats.add("retry", test)

            logger.info("%s: starting ...", test_prefix)
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            attempts = max(abs(retry), 1)
            for attempt in range(attempts):
                stats.add("attempts", test)

                # Create an output directory.  If there's already an
                # existing OUTPUT directory copy its contents to:
                #
                #     OUTPUT/YYYYMMDDHHMMSS.ATTEMPT
                #
                # so, when re-running, earlier attempts are saved.  Do
                # this before the OUTPUT/debug.log is started so that
                # each test attempt has its own log, and otherwise, it
                # too would be moved away.
                saved_output_directory = None
                saved_output = []
                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        # Include the time this test run started in
                        # the suffix - that way all saved results can
                        # be matched using a wild card.
                        saved_output_directory = os.path.join(test.output_directory,
                                                              "%s.%d" % (time.strftime("%Y%m%d%H%M%S", start_time), attempt))
                        logger.debug("moving existing OUTPUT to '%s'", saved_output_directory)
                        for name in os.listdir(test.output_directory):
                            src = os.path.join(test.output_directory, name)
                            dst = os.path.join(saved_output_directory, name)
                            if os.path.isfile(src):
                                os.makedirs(saved_output_directory, exist_ok=True)
                                os.rename(src, dst)
                                saved_output.append(name)
                                logger.debug("  moved '%s' to '%s'", src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, attempt+1, attempts, datetime.now())

                    # Add a log message about any saved output
                    # directory to the per-test-attempt debug log.  It
                    # just looks better.
                    if saved_output:
                        logger.info("saved existing '%s' in '%s'", saved_output, saved_output_directory)

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****", test.name)
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("attempts(%s:%s)" % (ending, result), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Ejemplo n.º 10
0
def _process_test(domain_prefix, test, args, test_stats, result_stats,
                  test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_post_time = None
    old_result = None
    backup_directory = os.path.join(args.backup_directory, test.name)

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    publish.json_status(logger, args, "processing %s" % test_prefix)
    with logger.time("processing test %s", test_prefix):

        # always perform post mortem on the test directory.
        try:

            ignored, details = ignore.test(logger, args, test)
            if ignored:
                # If there is any pre-existing output move it to
                # backup.  Otherwise it looks like the test was run
                # when it wasn't (and besides, the output is no longer
                # applicable).
                #
                # The isdir() test followed by a simple move, while
                # racy, should be good enough.
                if os.path.isdir(test.output_directory):
                    logger.info("moving '%s' to '%s'", test.output_directory,
                                backup_directory)
                    os.makedirs(os.path.dirname(backup_directory),
                                exist_ok=True)
                    os.rename(test.output_directory, backup_directory)
                result_stats.add_ignored(test, ignored)
                test_stats.add(test, "ignored")
                logger.info("%s %s ignored (%s) %s", prefix, test_prefix,
                            details, suffix)
                return

            # Be lazy when gathering the results, don't run the
            # sanitizer or diff.  Let post.mortem figure out if the
            # test finished.

            old_result = post.mortem(test,
                                     args,
                                     domain_prefix=domain_prefix,
                                     quick=True)
            if skip.result(logger, args, old_result):
                logger.info("%s %s skipped (previously %s) %s", prefix,
                            test_prefix, old_result, suffix)
                test_stats.add(test, "skipped")
                result_stats.add_skipped(old_result)
                return

            if old_result:
                test_stats.add(test, "tests", "retry")
                logger.info("%s %s started (previously %s) ....", prefix,
                            test_prefix, old_result)
            else:
                test_stats.add(test, "tests", "try")
                logger.info("%s %s started ....", prefix, test_prefix)
            test_stats.add(test, "tests")

            # Create just the OUTPUT/ directory; if it already exists,
            # move any contents to BACKUP/.  Do it file-by-file so
            # that, at no point, the OUTPUT/ directory is missing
            # (having an OUTPUT/ directory implies the test was
            # started).
            #
            # Don't create the path.  If the parent directory is
            # missing, this will fail.
            #
            # By backing up each test just before it is started, a
            # trail of what tests were attempted during each run is
            # created.
            #
            # XXX:
            #
            # During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # By moving the directory contents to BACKUP/, which is
            # not under testing/pluto/ this problem is avoided.

            try:
                os.mkdir(test.output_directory)
            except FileNotFoundError:
                # Bail, something is messed up (for instance the parent directory doesn't exist).
                return
            except FileExistsError:
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, backup_directory)
                # Even if OUTPUT/ is empty, move it.
                os.makedirs(backup_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(backup_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    os.replace(src, dst)

            # Now that the OUTPUT directory is empty, start a debug
            # log writing to that directory; include timing for this
            # test run.

            with logger.debug_time("testing %s",
                                   test_prefix,
                                   logfile=os.path.join(
                                       test.output_directory, "debug.log"),
                                   loglevel=logutil.INFO) as test_runtime:

                # boot the domains
                with logger.time("booting domains") as test_boot_time:
                    try:
                        test_domains = _boot_test_domains(
                            logger, test, domain_prefix, boot_executor)
                    except pexpect.TIMEOUT:
                        logger.exception("timeout while booting domains")
                        # Bail.  Being unable to boot the domains is a
                        # disaster.  The test is UNRESOLVED.
                        return

                # Run the scripts directly
                with logger.time(
                        "running scripts %s",
                        " ".join(("%s:%s" % (host, script)) for host, script in
                                 test.host_script_tuples)) as test_script_time:
                    with tcpdump.Dump(
                            logger,
                            domain_prefix,
                            test.output_directory, [
                                test_domain.domain
                                for test_domain in test_domains.values()
                            ],
                            enable=args.tcpdump):

                        try:

                            # re-direct the test-result log file
                            for test_domain in test_domains.values():
                                output = os.path.join(
                                    test.output_directory,
                                    test_domain.domain.host_name +
                                    ".console.verbose.txt")
                                test_domain.console.output(open(output, "w"))

                            for host, script in test.host_script_tuples:
                                if args.stop_at == script:
                                    logger.error(
                                        "stopping test run at (before executing) script %s",
                                        script)
                                    break
                                test_domain = test_domains[host]
                                try:
                                    test_domain.read_file_run(script)
                                except BaseException as e:
                                    # if there is an exception, write
                                    # it to the console
                                    test_domain.console.child.logfile.write(
                                        "\n*** exception running script %s ***\n%s"
                                        % (script, str(e)))
                                    raise

                            for test_domain in test_domains.values():
                                test_domain.console.child.logfile.write(
                                    post.DONE)

                        except pexpect.TIMEOUT as e:
                            # A test ending with a timeout gets
                            # treated as unresolved.  Timeouts
                            # shouldn't occur so human intervention
                            # is required.
                            logger.error(
                                "**** timeout out while running test script %s ****",
                                script)

                        finally:

                            # Close the redirected test-result log files
                            logger.info(
                                "closing all the test domain log files")
                            for test_domain in test_domains.values():
                                outfile = test_domain.console.output()
                                outfile.close()

                            # Always disconnect from the test domains.
                            logger.info("closing all the test domains")
                            for test_domain in test_domains.values():
                                test_domain.close()

        finally:

            with logger.time("post-mortem %s", test_prefix):
                # The test finished; it is assumed that post.mortem
                # can deal with a crashed test.
                result = post.mortem(test,
                                     args,
                                     domain_prefix=domain_prefix,
                                     update=True)
                logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                            result.issues and " ", result.issues, suffix)

            # If the test was run (a fresh run would delete RESULT)
            # and finished (resolved in POSIX terminology), emit
            # enough JSON to fool scripts like pluto-testlist-scan.sh.
            #
            # A test that timed-out or crashed, isn't considered
            # resolved so the file isn't created.
            #
            # XXX: this should go away.

            if not os.path.isfile(test.result_file()) \
            and result.resolution.isresolved():
                RESULT = {
                    jsonutil.result.testname:
                    test.name,
                    jsonutil.result.expect:
                    test.status,
                    jsonutil.result.result:
                    result,
                    jsonutil.result.issues:
                    result.issues,
                    jsonutil.result.hosts:
                    test.host_names,
                    jsonutil.result.time:
                    jsonutil.ftime(test_runtime.start),
                    jsonutil.result.runtime:
                    round(test_runtime.seconds(), 1),
                    jsonutil.result.boot_time:
                    round(test_boot_time.seconds(), 1),
                    jsonutil.result.script_time:
                    round(test_script_time.seconds(), 1),
                    jsonutil.result.total_time:
                    round(test_runtime.seconds(), 1),
                }
                j = jsonutil.dumps(RESULT)
                logger.debug("filling '%s' with json: %s", test.result_file(),
                             j)
                with open(test.result_file(), "w") as f:
                    f.write(j)
                    f.write("\n")

            # Do this after RESULT is created so it too is published.
            publish.everything(logger, args, result)
            publish.json_status(logger, args, "finished %s" % test_prefix)

            test_stats.add(test, "tests", str(result))
            result_stats.add_result(result, old_result)
            # test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
            result_stats.log_summary(logger.info,
                                     header="updated test results:",
                                     prefix="  ")
Ejemplo n.º 11
0
def main():

    parser = argparse.ArgumentParser(description="write 'table.json' to standard output")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("--quick", action="store_true",
                        help=("Use the previously generated '.console.txt' and '.console.diff' files"))
    parser.add_argument("--quick-sanitize", action="store_true",
                        help=("Use the previously generated '.console.txt' file"))
    parser.add_argument("--quick-diff", action="store_true",
                        help=("Use the previously generated '.console.diff' file"))

    parser.add_argument("--update", action="store_true",
                        help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize", action="store_true",
                        help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff", action="store_true",
                        help=("Update the '.console.diff' file"))

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="%(metavar)s containing: a test, a testsuite (contains a TESTLIST file), a TESTLIST file, test output, or testsuite output")

    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)
    skip.add_arguments(parser)
    ignore.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    columns = [ "Test", "Expected", "Result", "Run time", "Responder", "..." ]
    rows = []

    for test in tests:

        sys.stderr.write("%s\n" % test.name)

        # Filter out tests that are being ignored?
        ignored, include_ignored, details = ignore.test(logger, args, test)
        if ignored:
            if not include_ignored:
                continue

        # Filter out tests that have not been run
        result = None
        if not ignored:
            result = post.mortem(test, args, baseline=None,
                                 output_directory=test.saved_output_directory,
                                 test_finished=None,
                                 skip_sanitize=args.quick or args.quick_sanitize,
                                 skip_diff=args.quick or args.quick_diff,
                                 update=args.update,
                                 update_sanitize=args.update_sanitize,
                                 update_diff=args.update_diff)
            if skip.result(logger, args, result):
                continue

        row = [
            test.name,
            test.expected_result, str(result),
            "run-time"
        ]
        for host in sorted(test.host_names):
            errors = result.errors.errors
            if host in errors:
                row.append("%s %s" % (host, " ".join(sorted(errors[host]))))
            else:
                row.append("%s passed" % (host))

        rows.append(row)

    summary = {
        "Total": 0,
        "passed": 0,
        "failed": 0,
        "abort": 0,
        "missing baseline": 0,
        "missing console output": 0,
        "missing OUTPUT": 0,
        "missing RESULT": 0,
        "ASSERT": 0,
        "CORE": 0,
        "EXPECT": 0,
        "GPFAULT": 0,
        "SEGFAULT": 0,
        "date": "0000-00-00",
        "dir": "testing/pluto",
        "runtime": 0,
        "runtime_str": "00:00:00",
    }

    table = {
        "suffix": "/OUTPUT",
        "summary": summary,
        "columns": columns,
        "rows": rows,
        "runDir": "???",
    }
    print(json.dumps(table, indent=2))
    return 0
Ejemplo n.º 12
0
def _process_test(domain_prefix, test, args, test_stats, result_stats,
                  test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_start_time = test_total_time = None

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    with logger.time("processing test %s", test_prefix) as test_total_time:

        ignored, include_ignored, details = ignore.test(logger, args, test)
        if ignored and not include_ignored:
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            # No need to log all the ignored tests when an explicit
            # sub-set of tests is being run.  For instance, when running
            # just one test.
            if not args.test_name:
                logger.info("$s %s ignored (%s) %s", prefix, test_prefix,
                            details, suffix)
            return

        # Be lazy with gathering the results, don't run the sanitizer or
        # diff.
        #
        # XXX: There is a bug here where the only difference is white
        # space.  The test will show up as failed when it previously
        # showed up as a white-space pass.
        #
        # The presence of the RESULT file is a proxy for detecting that
        # the test was incomplete.

        old_result = post.mortem(test,
                                 args,
                                 test_finished=None,
                                 skip_diff=True,
                                 skip_sanitize=True)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s", prefix,
                        test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            return

        if old_result:
            test_stats.add(test, "tests", "retry")
            logger.info("%s %s started (previously %s) ....", prefix,
                        test_prefix, old_result)
        else:
            test_stats.add(test, "tests", "try")
            logger.info("%s %s started ....", prefix, test_prefix)
        test_stats.add(test, "tests")

        # Create the OUTPUT/ directory; if it already exists, move any
        # contents to BACKUP/.  Do it file-by-file so that, at no
        # point, the OUTPUT/ directory missing (presence of OUTPUT/
        # implies the test was started).
        #
        # By backing up each test just before it is started, a trail
        # of what tests were attempted during each run is created.
        #
        # XXX:
        #
        # During boot, swan-transmogrify runs "chcon -R
        # testing/pluto".  Of course this means that each time a test
        # is added and/or a test is run (adding files under
        # <test>/OUTPUT), the boot process (and consequently the time
        # taken to run a test) keeps increasing.
        #
        # By moving the directory contents to BACKUP/, which is not
        # under testing/pluto/ this problem is avoided.

        try:
            os.mkdir(test.output_directory)
        except FileExistsError:
            backup_directory = os.path.join(args.backup_directory, test.name)
            logger.info("moving contents of '%s' to '%s'",
                        test.output_directory, backup_directory)
            # Even if OUTPUT/ is empty, copy it.
            os.makedirs(backup_directory, exist_ok=True)
            for name in os.listdir(test.output_directory):
                src = os.path.join(test.output_directory, name)
                dst = os.path.join(backup_directory, name)
                logger.debug("moving '%s' to '%s'", src, dst)
                os.replace(src, dst)

        # Now that the OUTPUT directory is empty, start a debug log
        # writing to that directory; include timing for this test run.

        with logger.debug_time("testing %s",
                               test_prefix,
                               logfile=os.path.join(test.output_directory,
                                                    "debug.log"),
                               loglevel=logutil.INFO) as test_runtime:

            with logger.time("booting domains") as test_boot_time:
                try:
                    test_domains = _boot_test_domains(logger, test,
                                                      domain_prefix,
                                                      boot_executor)
                except pexpect.TIMEOUT:
                    logger.exception("timeout while booting domains",
                                     test.name)
                    # Bail before RESULT is written - being unable to
                    # boot the domains is a disaster.
                    return

            # Run the scripts directly
            with logger.time(
                    "running scripts %s",
                    " ".join(("%s:%s" % (host, script)) for host, script in
                             test.host_script_tuples)) as test_script_time:
                try:

                    # re-direct the test-result log file
                    for test_domain in test_domains.values():
                        output = os.path.join(
                            test.output_directory,
                            test_domain.domain.host_name +
                            ".console.verbose.txt")
                        test_domain.console.output(open(output, "w"))

                    for host, script in test.host_script_tuples:
                        if args.stop_at == script:
                            logger.error(
                                "stopping test run at (before executing) script %s",
                                script)
                            break
                        test_domain = test_domains[host]
                        test_domain.read_file_run(script)
                    result = post.mortem(test,
                                         args,
                                         test_finished=True,
                                         update=True)

                except pexpect.TIMEOUT as e:
                    logger.exception(
                        "**** timeout out while running script %s ****",
                        script)
                    # Still peform post-mortem so that errors are
                    # captured, but force the result to
                    # incomplete.
                    result = post.mortem(test,
                                         args,
                                         test_finished=False,
                                         update=True)

                finally:

                    # Close the redirected test-result log files
                    for test_domain in test_domains.values():
                        logfile = test_domain.console.output()
                        logfile.close()

                    # Always disconnect from the test domains.
                    logger.info("closing all test domains")
                    for test_domain in test_domains.values():
                        logfile = test_domain.console.output()
                        if logfile:
                            logfile.close()
                        test_domain.close()

    # Above will have set RESULT.  Exceptions such as control-c or
    # a crash bypass this code.

    logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.errors
                and " ", result.errors, suffix)

    # Since the test finished, emit enough JSON to fool scripts like
    # pluto-testlist-scan.sh.
    #
    # This also leaves a simple marker to indicate that the test
    # finished.
    #
    # A more robust way of doing this would be to mark each of the
    # console logs as complete as it is closed.
    #
    # More detailed information can be extracted from the debug.log.
    hosts = {}
    for host in sorted(test.host_names):
        if host in result.errors:
            hosts[host] = [error for error in result.errors[host]]
        else:
            hosts[host] = ["passed"]
    RESULT = {
        jsonutil.result.testname: test.name,
        jsonutil.result.expect: test.expected_result,
        jsonutil.result.result: str(result),
        jsonutil.result.time: jsonutil.ftime(datetime.now()),
        jsonutil.result.runtime: round(test_runtime.seconds(), 1),
        jsonutil.result.boot_time: round(test_boot_time.seconds(), 1),
        jsonutil.result.script_time: round(test_script_time.seconds(), 1),
        jsonutil.result.total_time: round(test_total_time.seconds(), 1),
        jsonutil.result.hosts: hosts,
    }
    j = jsonutil.dumps(RESULT)
    logger.info("filling '%s' with json: %s", test.result_file(), j)
    with open(test.result_file(), "w") as f:
        f.write(j)
        f.write("\n")

    test_stats.add(test, "tests", str(result))
    result_stats.add_result(result, old_result)
    test_stats.log_summary(logger.info,
                           header="updated test stats:",
                           prefix="  ")
    result_stats.log_summary(logger.info,
                             header="updated test results:",
                             prefix="  ")
Ejemplo n.º 13
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument(
        "--retry",
        type=int,
        metavar="COUNT",
        help=
        "number of times a test should be attempted before giving up (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed); a negative %(metavar)s selects all tests; a zero %(metavar)s selects not-started tests; a positive %(metavar)s selects not-started, incomplete and failing tests; default is to select not-started tests"
    )
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument(
        "--output-directory",
        default=None,
        metavar="DIRECTORY",
        help="save test results as %(metavar)s/<test> instead of <test>/OUTPUT"
    )
    parser.add_argument(
        "directories",
        metavar="DIRECTORY",
        nargs="+",
        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  output-directory: %s", args.output_directory
                or "<testsuite>/<test>/OUTPUT (default)")
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(
        logger,
        args.directories,
        args,
        testsuite_output_directory=args.output_directory,
        log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s",
                     args.directories)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1
        logger.info(
            "Explicit directory list; forcing --retry=%d (retry failed tests)",
            args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        test_count = 0
        for test in tests:
            stats.add("total", test)
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count,
                                                         len(tests))

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("%s: passed", test_prefix)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)",
                                    test_prefix, result, test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    stats.add("retry", test)

            logger.info("%s: starting ...", test_prefix)
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            attempts = max(abs(retry), 1)
            for attempt in range(attempts):
                stats.add("attempts", test)

                # Create an output directory.  If there's already an
                # existing OUTPUT directory copy its contents to:
                #
                #     OUTPUT/YYYYMMDDHHMMSS.ATTEMPT
                #
                # so, when re-running, earlier attempts are saved.  Do
                # this before the OUTPUT/debug.log is started so that
                # each test attempt has its own log, and otherwise, it
                # too would be moved away.
                saved_output_directory = None
                saved_output = []
                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        # Include the time this test run started in
                        # the suffix - that way all saved results can
                        # be matched using a wild card.
                        saved_output_directory = os.path.join(
                            test.output_directory, "%s.%d" % (time.strftime(
                                "%Y%m%d%H%M%S", start_time), attempt))
                        logger.debug("moving existing OUTPUT to '%s'",
                                     saved_output_directory)
                        for name in os.listdir(test.output_directory):
                            src = os.path.join(test.output_directory, name)
                            dst = os.path.join(saved_output_directory, name)
                            if os.path.isfile(src):
                                os.makedirs(saved_output_directory,
                                            exist_ok=True)
                                os.rename(src, dst)
                                saved_output.append(name)
                                logger.debug("  moved '%s' to '%s'", src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(
                        logger, os.path.join(test.output_directory,
                                             "debug.log")):
                    logger.info(
                        "****** test %s attempt %d of %d started at %s ******",
                        test.name, attempt + 1, attempts, datetime.now())

                    # Add a log message about any saved output
                    # directory to the per-test-attempt debug log.  It
                    # just looks better.
                    if saved_output:
                        logger.info("saved existing '%s' in '%s'",
                                    saved_output, saved_output_directory)

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test,
                                             args,
                                             update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'",
                                        test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****",
                                         test.name)
                        result = post.mortem(test,
                                             args,
                                             update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("attempts(%s:%s)" % (ending, result), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Ejemplo n.º 14
0
def results(logger, tests, baseline, args, result_stats):

    for test in tests:

        # Produce separate runtimes for each test.
        with logutil.TIMER:

            logger.debug("start processing test %s", test.name)

            # Filter out tests that are being ignored?
            ignore = testsuite.ignore(test, args)
            if ignore:
                result_stats.add_ignore(test, ignore)
            if ignore and not args.list_ignored:
                continue

            # Filter out tests that have not been run
            result = None
            if not ignore and args.print_result:
                result = post.mortem(test, args, baseline=baseline,
                                     output_directory=test.saved_output_directory,
                                     skip_sanitize=args.quick or args.quick_sanitize,
                                     skip_diff=args.quick or args.quick_diff,
                                     update=args.update,
                                     update_sanitize=args.update_sanitize,
                                     update_diff=args.update_diff)
                if not result and not args.list_untested:
                    continue

            result_stats.add_result(result)

            sep = ""

            # Print the test's name/path
            if not args.prefix_directory and not args.prefix_name and not args.prefix_output_directory:
                # By default: when the path given on the command line
                # explicitly specifies a test's output directory
                # (found in TEST.SAVED_OUTPUT_DIRECTORY), print that;
                # otherwise print the path to the test's directory.
                print(sep, end="")
                print((test.saved_output_directory
                       and test.saved_output_directory
                       or test.directory), end="")
                sep = " "
            else:
                # Print the test name/path per command line
                if args.prefix_name:
                    print(sep, end="")
                    print(test.name, end="")
                    sep = " "
                if args.prefix_directory:
                    print(sep, end="")
                    print(test.directory, end="")
                    sep = " "
                if args.prefix_output_directory:
                    print(sep, end="")
                    print((test.saved_output_directory
                           and test.saved_output_directory
                           or test.output_directory), end="")
                    sep = " "

            if ignore:
                print(sep, end="")
                print("ignored", ignore, end="")
                sep = " "

            if result:
                print(sep, end="")
                if result.errors:
                    print(result, result.errors, end="")
                else:
                    print(result, end="")
                sep = " "

            if args.print_domains:
                for name in test.domain_names():
                    print(sep, end="")
                    print(name, end="")
                    sep = " "

            if args.print_initiators:
                for name in test.initiator_names():
                    print(sep, end="")
                    print(name, end="")
                    sep = " "

            if args.print_scripts:
                for scripts in test.scripts():
                    for name, script in scripts.items():
                        print(sep, end="")
                        print(name + ":" + script, end="")
                        sep = ","
                    sep = " "

            print()

            if args.print_diff and result:
                for domain in result.diffs:
                    for line in result.diffs[domain]:
                        if line:
                            print(line)

            sys.stdout.flush()

            logger.debug("stop processing test %s", test.name)
Ejemplo n.º 15
0
def main():

    parser = argparse.ArgumentParser(description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]",
                                     epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).  If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified.")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("--quick", action="store_true",
                        help=("Use the previously generated '.console.txt' and '.console.diff' files"))
    parser.add_argument("--quick-sanitize", action="store_true",
                        help=("Use the previously generated '.console.txt' file"))
    parser.add_argument("--quick-diff", action="store_true",
                        help=("Use the previously generated '.console.diff' file"))
    parser.add_argument("--update", action="store_true",
                        help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize", action="store_true",
                        help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff", action="store_true",
                        help=("Update the '.console.diff' file"))

    parser.add_argument("--print-directory", action="store_true")
    parser.add_argument("--print-name", action="store_true")
    # parser.add_argument("--print-result", action="store_true")
    parser.add_argument("--print-diff", action="store_true")
    parser.add_argument("--print-args", action="store_true")
    parser.add_argument("--print-output-directory", action="store_true")

    parser.add_argument("--list-ignored", action="store_true",
                        help="include ignored tests in the list")
    parser.add_argument("--list-untested", action="store_true",
                        help="include untested tests in the list")

    parser.add_argument("directories", metavar="TEST-DIRECTORY", nargs="+",
                        help=("Either a testsuite (only one) or test directory"))
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consume all remaining parameters.
    parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?",
                        help=("An optional testsuite directory containing"
                              " results from a previous test run"))
    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0
    args.print_directory = args.print_directory or args.verbose > v
    args.print_name = args.print_name or args.verbose > v
    v += 1
    args.print_output_directory = args.print_output_directory or args.verbose > v
    v += 1
    args.list_untested = args.list_untested or args.verbose > v ; v += 1
    args.list_ignored = args.list_ignored or args.verbose > v ; v += 1
    v += 1
    args.print_args = args.print_args or args.verbose > v

    if args.print_args:
        post.log_arguments(logger, args)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        return 1

    # Is the last argument some sort of baseline?  If it is, pre-load
    # it.
    #
    # XXX: Should also support something like --baseline-testsuite and
    # --baseline-output parameters.
    baseline = None
    if len(args.directories) > 1:
        # If there is more than one directory then, perhaps, the last
        # one is a baseline.  A baseline might be: a complete
        # testsuite snapshot; or just output saved as
        # testing/pluto/OUTPUT/TESTDIR.
        baseline = testsuite.load(logger, args.directories[-1], args,
                                  error_level=logutil.DEBUG)
        if baseline:
            # discard the last argument as consumed above.
            logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1])
            args.directories.pop()

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    # When an explicit list of directories was specified always print
    # all of them (otherwise, tests seem to get lost).
    if isinstance(tests, list):
        args.list_untested = True

    for test in tests:

        # Produce separate runtimes for each test.
        with logutil.TIMER:

            logger.debug("start processing test %s", test.name)

            # Filter out tests that are being ignored?
            ignore = testsuite.ignore(test, args)
            if ignore and not args.list_ignored:
                continue

            # Filter out tests that have not been run?
            result = None
            if not ignore:
                result = post.mortem(test, args, baseline=baseline,
                                     output_directory=test.saved_output_directory,
                                     skip_sanitize=args.quick or args.quick_sanitize,
                                     skip_diff=args.quick or args.quick_diff,
                                     update=args.update,
                                     update_sanitize=args.update_sanitize,
                                     update_diff=args.update_diff)
                if not result and not args.list_untested:
                    continue

            sep = ""

            # Print the test's name/path
            if not args.print_directory and not args.print_name and not args.print_output_directory:
                # By default: when the path given on the command line
                # explicitly specifies a test's output directory
                # (found in TEST.SAVED_OUTPUT_DIRECTORY), print that;
                # otherwise print the path to the test's directory.
                print(sep, end="")
                print((test.saved_output_directory
                       and test.saved_output_directory
                       or test.directory), end="")
                sep = " "
            else:
                # Print the test name/path per command line
                if args.print_name:
                    print(sep, end="")
                    print(test.name, end="")
                    sep = " "
                if args.print_directory:
                    print(sep, end="")
                    print(test.directory, end="")
                    sep = " "
                if args.print_output_directory:
                    print(sep, end="")
                    print((test.saved_output_directory
                           and test.saved_output_directory
                           or test.output_directory), end="")
                    sep = " "

            if ignore:
                print(sep, end="")
                print("ignored", ignore, end="")
                sep = " "

            print(sep, end="")
            if result.errors:
                print(result, result.errors, end="")
            else:
                print(result, end="")
            sep = " "

            print()

            if args.print_diff and result:
                for domain in result.diffs:
                    for line in result.diffs[domain]:
                        if line:
                            print(line)

            sys.stdout.flush()

            logger.debug("stop processing test %s", test.name)

    return 0
Ejemplo n.º 16
0
def main():

    parser = argparse.ArgumentParser(
        description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]",
        epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).",
    )
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument(
        "--quick", action="store_true", help=("Use the previously generated '.console.txt' and '.console.diff' files")
    )
    parser.add_argument(
        "--quick-sanitize", action="store_true", help=("Use the previously generated '.console.txt' file")
    )
    parser.add_argument("--quick-diff", action="store_true", help=("Use the previously generated '.console.diff' file"))
    parser.add_argument("--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize", action="store_true", help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff", action="store_true", help=("Update the '.console.diff' file"))

    parser.add_argument("--print-directory", action="store_true")
    parser.add_argument("--print-name", action="store_true")
    parser.add_argument("--print-result", action="store_true")
    parser.add_argument("--print-diff", action="store_true")
    parser.add_argument("--print-args", action="store_true")

    parser.add_argument("--list-ignored", action="store_true", help="include ignored tests in the list")
    parser.add_argument("--list-untested", action="store_true", help="include untested tests in the list")

    parser.add_argument(
        "directories", metavar="TEST-DIRECTORY", nargs="+", help=("Either a testsuite (only one) or test directory")
    )
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consume all remaining parameters.
    parser.add_argument(
        "baseline",
        metavar="BASELINE-DIRECTORY",
        nargs="?",
        help=("An optional testsuite directory containing" " results from a previous test run"),
    )
    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0
    args.print_directory = args.print_directory or args.verbose > v
    args.print_name = args.print_name or args.verbose > v
    v += 1
    args.list_untested = args.list_untested or args.verbose > v
    v += 1
    args.list_ignored = args.list_ignored or args.verbose > v
    v += 1
    v += 1
    args.print_args = args.print_args or args.verbose > v

    # By default print the relative directory path.
    if not args.print_directory and not args.print_name:
        args.print_directory = True

    if args.print_args:
        post.log_arguments(logger, args)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        return 1

    # If there is more than one directory then the last might be the
    # baseline.  Try loading it as a testsuite (baselines are
    # testsuites) to see if that is the case.
    basetests = None
    tests = None
    if len(args.directories) > 1:
        # Perhaps the last argument is the baseline?  Suppress any
        # nasty errors.
        basetests = testsuite.load(logger, args.directories[-1], error_level=logutil.DEBUG)
        if basetests:
            logger.debug("basetests loaded from '%s'", basetests.directory)
            args.directories.pop()
    tests = testsuite.load_testsuite_or_tests(logger, args.directories)
    logger.debug("basetests=%s", basetests)
    logger.debug("tests=%s", tests)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    # When an explicit list of directories was specified always print
    # all of them (otherwise, tests seem to get lost).
    if isinstance(tests, list):
        args.list_untested = True

    # Preload the baseline.  This avoids re-scanning the TESTLIST.
    # Also, passing the full baseline to Test.results() lets that
    # function differentiate between a baseline missing results or
    # being entirely absent.
    baseline = None
    if basetests:
        baseline = {}
        for test in basetests:
            baseline[test.name] = test

    for test in tests:

        # Produce separate runtimes for each test.
        with logutil.TIMER:

            logger.debug("start processing test %s", test.name)

            # Filter out tests that are being ignored?
            ignore = testsuite.ignore(test, args)
            if ignore and not args.list_ignored:
                continue

            # Filter out tests that have not been run?
            result = None
            if not ignore:
                result = post.mortem(
                    test,
                    args,
                    baseline=baseline,
                    output_directory=test.old_output_directory,
                    skip_sanitize=args.quick or args.quick_sanitize,
                    skip_diff=args.quick or args.quick_diff,
                    update=args.update,
                    update_sanitize=args.update_sanitize,
                    update_diff=args.update_diff,
                )
                if not result and not args.list_untested:
                    continue

            sep = ""

            if args.print_name:
                print(sep, end="")
                print(test.name, end="")
                sep = " "

            if args.print_directory:
                print(sep, end="")
                print(test.directory, end="")
                sep = " "

            if ignore:
                print(sep, end="")
                print("ignored", ignore, end="")
                sep = " "

            if result:
                print(sep, end="")
                print(result, end="")
                sep = " "

            print()

            if args.print_diff and result:
                for domain in result.diffs:
                    for line in result.diffs[domain]:
                        if line:
                            print(line)

            sys.stdout.flush()

            logger.debug("stop processing test %s", test.name)

    return 0
Ejemplo n.º 17
0
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_total_time = None

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    with logger.time("processing test %s", test_prefix) as test_total_time:

        ignored, details = ignore.test(logger, args, test)
        if ignored:
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            logger.info("%s %s ignored (%s) %s",
                        prefix, test_prefix, details, suffix)
            return

        # Be lazy when gathering the results, don't run the sanitizer
        # or diff.  Let post.mortem figure out if the test finished.

        old_result = post.mortem(test, args, domain_prefix=domain_prefix,
                                 quick=True, finished=None)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s",
                        prefix, test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            return

        if old_result:
            test_stats.add(test, "tests", "retry")
            logger.info("%s %s started (previously %s) ....",
                        prefix, test_prefix, old_result)
        else:
            test_stats.add(test, "tests", "try")
            logger.info("%s %s started ....", prefix, test_prefix)
        test_stats.add(test, "tests")

        # Create just the OUTPUT/ directory; if it already exists,
        # move any contents to BACKUP/.  Do it file-by-file so that,
        # at no point, the OUTPUT/ directory is missing (having an
        # OUTPUT/ directory implies the test was started).
        #
        # Don't create the path.  If the parent directory is missing,
        # this will fail.
        #
        # By backing up each test just before it is started, a trail
        # of what tests were attempted during each run is created.
        #
        # XXX:
        #
        # During boot, swan-transmogrify runs "chcon -R
        # testing/pluto".  Of course this means that each time a test
        # is added and/or a test is run (adding files under
        # <test>/OUTPUT), the boot process (and consequently the time
        # taken to run a test) keeps increasing.
        #
        # By moving the directory contents to BACKUP/, which is not
        # under testing/pluto/ this problem is avoided.

        try:
            os.mkdir(test.output_directory)
        except FileNotFoundError:
            # Bail, something is messed up (for instance the parent directory doesn't exist).
            return
        except FileExistsError:
            backup_directory = os.path.join(args.backup_directory, test.name)
            logger.info("moving contents of '%s' to '%s'",
                        test.output_directory, backup_directory)
            # Even if OUTPUT/ is empty, copy it.
            os.makedirs(backup_directory, exist_ok=True)
            for name in os.listdir(test.output_directory):
                src = os.path.join(test.output_directory, name)
                dst = os.path.join(backup_directory, name)
                logger.debug("moving '%s' to '%s'", src, dst)
                os.replace(src, dst)

        # Now that the OUTPUT directory is empty, start a debug log
        # writing to that directory; include timing for this test run.

        with logger.debug_time("testing %s", test_prefix,
                               logfile=os.path.join(test.output_directory,
                                                    "debug.log"),
                               loglevel=logutil.INFO) as test_runtime:

            with logger.time("booting domains") as test_boot_time:
                try:
                    test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor)
                except pexpect.TIMEOUT:
                    logger.exception("timeout while booting domains")
                    # Bail before RESULT is written - being unable to
                    # boot the domains is a disaster.  The test is
                    # UNRESOLVED.
                    return

            # Run the scripts directly
            with logger.time("running scripts %s",
                             " ".join(("%s:%s" % (host, script))
                                      for host, script in test.host_script_tuples)) as test_script_time:
                with tcpdump.Dump(logger, domain_prefix, test.output_directory,
                                  [test_domain.domain for test_domain in test_domains.values()],
                                  enable=args.tcpdump):

                    try:

                        # re-direct the test-result log file
                        for test_domain in test_domains.values():
                            output = os.path.join(test.output_directory,
                                                  test_domain.domain.host_name + ".console.verbose.txt")
                            test_domain.console.output(open(output, "w"))

                        try:
                            for host, script in test.host_script_tuples:
                                if args.stop_at == script:
                                    logger.error("stopping test run at (before executing) script %s", script)
                                    break
                                test_domain = test_domains[host]
                                test_domain.read_file_run(script)
                        except pexpect.TIMEOUT as e:
                            # A test ending with a timeout is still a
                            # finished test.  Analysis of the results
                            # will detect this and flag it as a fail.
                            logger.error("**** timeout out while running script %s ****", script)

                    finally:

                        # Close the redirected test-result log files
                        for test_domain in test_domains.values():
                            logfile = test_domain.console.output()
                            logfile.close()

                        # Always disconnect from the test domains.
                        logger.info("closing all test domains")
                        for test_domain in test_domains.values():
                            logfile = test_domain.console.output()
                            if logfile:
                                logfile.close()
                            test_domain.close()

    # The test finished.  Aborts such as a failed boot, or a timeout,
    # will skip all the below.

    result = post.mortem(test, args, domain_prefix=domain_prefix,
                         finished=True, update=True)

    logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                result.issues and " ", result.issues, suffix)

    # Since the the test finished (resolved in POSIX terminology)",
    # emit enough JSON to fool scripts like pluto-testlist-scan.sh.
    #
    # A test that timed-out or crashed, isn't considered resolved.
    #
    # A more robust way of doing this would be to mark each of the
    # console logs as complete as it is closed.
    #
    # More detailed information can be extracted from the debug.log.

    RESULT = {
        jsonutil.result.testname: test.name,
        jsonutil.result.expect: test.status,
        jsonutil.result.result: result,
        jsonutil.result.issues: result.issues,
        jsonutil.result.hosts: test.host_names,
        jsonutil.result.time: jsonutil.ftime(test_total_time.start),
        jsonutil.result.runtime: round(test_runtime.seconds(), 1),
        jsonutil.result.boot_time: round(test_boot_time.seconds(), 1),
        jsonutil.result.script_time: round(test_script_time.seconds(), 1),
        jsonutil.result.total_time: round(test_total_time.seconds(), 1),
    }
    j = jsonutil.dumps(RESULT)
    logger.info("filling '%s' with json: %s", test.result_file(), j)
    with open(test.result_file(), "w") as f:
        f.write(j)
        f.write("\n")

    test_stats.add(test, "tests", str(result))
    result_stats.add_result(result, old_result)
    test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
    result_stats.log_summary(logger.info, header="updated test results:", prefix="  ")
Ejemplo n.º 18
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT",
                        help="number of times a test should be attempted before giving up (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed); a negative %(metavar)s selects all tests; a zero %(metavar)s selects not-started tests; a positive %(metavar)s selects not-started, incomplete and failing tests; default is to select not-started tests")
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("--output-directory", default=None, metavar="DIRECTORY",
                        help="save test results as %(metavar)s/<test> instead of <test>/OUTPUT")
    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  output-directory: %s", args.output_directory or "<testsuite>/<test>/OUTPUT (default)")
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              testsuite_output_directory=args.output_directory,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1;
        logger.info("Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        test_count = 0
        for test in tests:
            stats.add("total", test)
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests))

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("%s: passed", test_prefix)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)", test_prefix,
                                    result, test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    stats.add("retry", test)

            logger.info("%s: starting ...", test_prefix)
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            attempts = max(abs(retry), 1)
            for attempt in range(attempts):
                stats.add("attempts", test)

                # On first attempt (attempt == 0), empty the
                # <test>/OUTPUT/ directory of all contents.  On
                # subsequent attempts, move the files from the
                # previous attempt to <test>/OUTPUT/<attempt>/.
                #
                # XXX: Don't just delete the OUTPUT/ directory as
                # this, for a short period, changes the status of the
                # test to never-run.
                #
                # XXX: During boot, swan-transmogrify runs "chcon -R
                # testing/pluto".  Of course this means that each time
                # a test is added and/or a test is run (adding files
                # under <test>/OUTPUT), the boot process (and
                # consequently the time taken to run a test) keeps
                # increasing.
                #
                # Mitigate this slightly by emptying <test>/OUTPUT
                # before starting any test attempts.  It's assumed
                # that the previous test run was already captured
                # above with save-directory.

                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        saved_output_directory = os.path.join(test.output_directory, str(attempt))
                        logger.info("emptying directory '%s'", test.output_directory)
                        for name in os.listdir(test.output_directory):
                            src = os.path.join(test.output_directory, name)
                            if attempt == 0:
                                logger.debug("  remove '%s'", src)
                                if os.path.isfile(src):
                                    os.remove(src)
                                else:
                                    shutil.rmtree(src)
                            elif os.path.isfile(src):
                                dst = os.path.join(saved_output_directory, name)
                                logger.debug("  move '%s' to '%s'", src, dst)
                                os.makedirs(saved_output_directory, exist_ok=True)
                                os.rename(src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, attempt+1, attempts, datetime.now())

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****", test.name)
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("attempts(%s:%s)" % (ending, result), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Ejemplo n.º 19
0
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_start_time = test_total_time = None

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    with logger.time("processing test %s", test_prefix) as test_total_time:

        ignored, include_ignored, details = ignore.test(logger, args, test)
        if ignored and not include_ignored:
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            # No need to log all the ignored tests when an explicit
            # sub-set of tests is being run.  For instance, when running
            # just one test.
            if not args.test_name:
                logger.info("$s %s ignored (%s) %s",
                            prefix, test_prefix, details, suffix)
            return

        # Be lazy with gathering the results, don't run the sanitizer or
        # diff.
        #
        # XXX: There is a bug here where the only difference is white
        # space.  The test will show up as failed when it previously
        # showed up as a white-space pass.
        #
        # The presence of the RESULT file is a proxy for detecting that
        # the test was incomplete.

        old_result = post.mortem(test, args, test_finished=None,
                                 skip_diff=True, skip_sanitize=True)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s",
                        prefix, test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            return

        if old_result:
            test_stats.add(test, "tests", "retry")
            logger.info("%s %s started (previously %s) ....",
                        prefix, test_prefix, old_result)
        else:
            test_stats.add(test, "tests", "try")
            logger.info("%s %s started ....", prefix, test_prefix)
        test_stats.add(test, "tests")

        # Create the OUTPUT/ directory; if it already exists, move any
        # contents to BACKUP/.  Do it file-by-file so that, at no
        # point, the OUTPUT/ directory missing (presence of OUTPUT/
        # implies the test was started).
        #
        # By backing up each test just before it is started, a trail
        # of what tests were attempted during each run is created.
        #
        # XXX:
        #
        # During boot, swan-transmogrify runs "chcon -R
        # testing/pluto".  Of course this means that each time a test
        # is added and/or a test is run (adding files under
        # <test>/OUTPUT), the boot process (and consequently the time
        # taken to run a test) keeps increasing.
        #
        # By moving the directory contents to BACKUP/, which is not
        # under testing/pluto/ this problem is avoided.

        try:
            os.mkdir(test.output_directory)
        except FileExistsError:
            backup_directory = os.path.join(args.backup_directory, test.name)
            logger.info("moving contents of '%s' to '%s'",
                        test.output_directory, backup_directory)
            # Even if OUTPUT/ is empty, copy it.
            os.makedirs(backup_directory, exist_ok=True)
            for name in os.listdir(test.output_directory):
                src = os.path.join(test.output_directory, name)
                dst = os.path.join(backup_directory, name)
                logger.debug("moving '%s' to '%s'", src, dst)
                os.replace(src, dst)

        # Now that the OUTPUT directory is empty, start a debug log
        # writing to that directory; include timing for this test run.

        with logger.debug_time("testing %s", test_prefix,
                               logfile=os.path.join(test.output_directory,
                                                    "debug.log"),
                               loglevel=logutil.INFO) as test_runtime:

            with logger.time("booting domains") as test_boot_time:
                try:
                    test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor)
                except pexpect.TIMEOUT:
                    logger.exception("timeout while booting domains", test.name)
                    # Bail before RESULT is written - being unable to
                    # boot the domains is a disaster.
                    return


            # Run the scripts directly
            with logger.time("running scripts %s",
                             " ".join(("%s:%s" % (host, script))
                                      for host, script in test.host_script_tuples)) as test_script_time:
                try:

                    # re-direct the test-result log file
                    for test_domain in test_domains.values():
                        output = os.path.join(test.output_directory,
                                              test_domain.domain.host_name + ".console.verbose.txt")
                        test_domain.console.output(open(output, "w"))

                    for host, script in test.host_script_tuples:
                        if args.stop_at == script:
                            logger.error("stopping test run at (before executing) script %s", script)
                            break
                        test_domain = test_domains[host]
                        test_domain.read_file_run(script)
                    result = post.mortem(test, args, test_finished=True, update=True)

                except pexpect.TIMEOUT as e:
                    logger.exception("**** timeout out while running script %s ****", script)
                    # Still peform post-mortem so that errors are
                    # captured, but force the result to
                    # incomplete.
                    result = post.mortem(test, args, test_finished=False, update=True)

                finally:

                    # Close the redirected test-result log files
                    for test_domain in test_domains.values():
                        logfile = test_domain.console.output()
                        logfile.close()

                    # Always disconnect from the test domains.
                    logger.info("closing all test domains")
                    for test_domain in test_domains.values():
                        logfile = test_domain.console.output()
                        if logfile:
                            logfile.close()
                        test_domain.close()

    # Above will have set RESULT.  Exceptions such as control-c or
    # a crash bypass this code.

    logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                result.errors and " ", result.errors, suffix)

    # Since the test finished, emit enough JSON to fool scripts like
    # pluto-testlist-scan.sh.
    #
    # This also leaves a simple marker to indicate that the test
    # finished.
    #
    # A more robust way of doing this would be to mark each of the
    # console logs as complete as it is closed.
    #
    # More detailed information can be extracted from the debug.log.
    hosts = {}
    for host in sorted(test.host_names):
        if host in result.errors:
            hosts[host] = [error for error in result.errors[host]]
        else:
            hosts[host] = ["passed"]
    RESULT = {
        jsonutil.result.testname: test.name,
        jsonutil.result.expect: test.expected_result,
        jsonutil.result.result: str(result),
        jsonutil.result.time: jsonutil.ftime(datetime.now()),
        jsonutil.result.runtime: round(test_runtime.seconds(), 1),
        jsonutil.result.boot_time: round(test_boot_time.seconds(), 1),
        jsonutil.result.script_time: round(test_script_time.seconds(), 1),
        jsonutil.result.total_time: round(test_total_time.seconds(), 1),
        jsonutil.result.hosts: hosts,
    }
    j = jsonutil.dumps(RESULT)
    logger.info("filling '%s' with json: %s", test.result_file(), j)
    with open(test.result_file(), "w") as f:
        f.write(j)
        f.write("\n")

    test_stats.add(test, "tests", str(result))
    result_stats.add_result(result, old_result)
    test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
    result_stats.log_summary(logger.info, header="updated test results:", prefix="  ")
Ejemplo n.º 20
0
def main():

    parser = argparse.ArgumentParser(
        description=
        "list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]",
        epilog=
        "By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately)."
    )
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument(
        "--quick",
        action="store_true",
        help=
        ("Use the previously generated '.console.txt' and '.console.diff' files"
         ))
    parser.add_argument(
        "--quick-sanitize",
        action="store_true",
        help=("Use the previously generated '.console.txt' file"))
    parser.add_argument(
        "--quick-diff",
        action="store_true",
        help=("Use the previously generated '.console.diff' file"))
    parser.add_argument(
        "--update",
        action="store_true",
        help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize",
                        action="store_true",
                        help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff",
                        action="store_true",
                        help=("Update the '.console.diff' file"))

    parser.add_argument("--print-directory", action="store_true")
    parser.add_argument("--print-name", action="store_true")
    parser.add_argument("--print-result", action="store_true")
    parser.add_argument("--print-diff", action="store_true")
    parser.add_argument("--print-args", action="store_true")

    parser.add_argument("--list-ignored",
                        action="store_true",
                        help="include ignored tests in the list")
    parser.add_argument("--list-untested",
                        action="store_true",
                        help="include untested tests in the list")

    parser.add_argument(
        "directories",
        metavar="TEST-DIRECTORY",
        nargs="+",
        help=("Either a testsuite (only one) or test directory"))
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consume all remaining parameters.
    parser.add_argument("baseline",
                        metavar="BASELINE-DIRECTORY",
                        nargs="?",
                        help=("An optional testsuite directory containing"
                              " results from a previous test run"))
    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0
    args.print_directory = args.print_directory or args.verbose > v
    args.print_name = args.print_name or args.verbose > v
    v += 1
    args.list_untested = args.list_untested or args.verbose > v
    v += 1
    args.list_ignored = args.list_ignored or args.verbose > v
    v += 1
    v += 1
    args.print_args = args.print_args or args.verbose > v

    # By default print the relative directory path.
    if not args.print_directory and not args.print_name:
        args.print_directory = True

    if args.print_args:
        post.log_arguments(logger, args)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        return 1

    # If there is more than one directory then the last might be the
    # baseline.  Try loading it as a testsuite (baselines are
    # testsuites) to see if that is the case.
    basetests = None
    tests = None
    if len(args.directories) > 1:
        # Perhaps the last argument is the baseline?  Suppress any
        # nasty errors.
        basetests = testsuite.load(logger,
                                   args.directories[-1],
                                   error_level=logutil.DEBUG)
        if basetests:
            logger.debug("basetests loaded from '%s'", basetests.directory)
            args.directories.pop()
    tests = testsuite.load_testsuite_or_tests(logger, args.directories)
    logger.debug("basetests=%s", basetests)
    logger.debug("tests=%s", tests)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    # When an explicit list of directories was specified always print
    # all of them (otherwise, tests seem to get lost).
    if isinstance(tests, list):
        args.list_untested = True

    # Preload the baseline.  This avoids re-scanning the TESTLIST.
    # Also, passing the full baseline to Test.results() lets that
    # function differentiate between a baseline missing results or
    # being entirely absent.
    baseline = None
    if basetests:
        baseline = {}
        for test in basetests:
            baseline[test.name] = test

    for test in tests:

        # Produce separate runtimes for each test.
        with logutil.TIMER:

            logger.debug("start processing test %s", test.name)

            # Filter out tests that are being ignored?
            ignore = testsuite.ignore(test, args)
            if ignore and not args.list_ignored:
                continue

            # Filter out tests that have not been run?
            result = None
            if not ignore:
                result = post.mortem(
                    test,
                    args,
                    baseline=baseline,
                    output_directory=test.old_output_directory,
                    skip_sanitize=args.quick or args.quick_sanitize,
                    skip_diff=args.quick or args.quick_diff,
                    update=args.update,
                    update_sanitize=args.update_sanitize,
                    update_diff=args.update_diff)
                if not result and not args.list_untested:
                    continue

            sep = ""

            if args.print_name:
                print(sep, end="")
                print(test.name, end="")
                sep = " "

            if args.print_directory:
                print(sep, end="")
                print(test.directory, end="")
                sep = " "

            if ignore:
                print(sep, end="")
                print("ignored", ignore, end="")
                sep = " "

            if result:
                print(sep, end="")
                print(result, end="")
                sep = " "

            print()

            if args.print_diff and result:
                for domain in result.diffs:
                    for line in result.diffs[domain]:
                        if line:
                            print(line)

            sys.stdout.flush()

            logger.debug("stop processing test %s", test.name)

    return 0
Ejemplo n.º 21
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT", default=1,
                        help="which previously run tests should be retried: 0 selects not-started tests; 1 selects not-started+failed tests; -1 selects not-started+failed+passed tests (default is %(default)s)")
    parser.add_argument("--attempts", type=int, default=1,
                        help="number of times to attempt a test before giving up; default %(default)s")

    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    # Default to BACKUP under the current directory.  Name is
    # arbitrary, chosen for its hopefully unique first letter
    # (avoiding Makefile, OBJ, README, ... :-).
    parser.add_argument("--backup-directory", metavar="DIRECTORY",
                        default=os.path.join("BACKUP", time.strftime("%Y%m%d%H%M%S", time.localtime())),
                        help="backup existing <test>/OUTPUT to %(metavar)s/<test> (default: %(default)s)")

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry)
    logger.info("  attempts: %s", args.attempts)
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  backup-directory: %s", args.backup_directory)
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    test_stats = stats.Tests()
    result_stats = stats.Results()

    start_time = datetime.now()

    try:
        logger.info("run started at %s", start_time)

        test_count = 0
        for test in tests:

            test_stats.add("total", test)
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests))

            ignore = testsuite.ignore(test, args)
            if ignore:
                result_stats.add_ignore(test, ignore)
                test_stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.

            # Be lazy with gathering the results, don't run the
            # sanitizer or diff.
            old_result = post.mortem(test, args, skip_diff=True, skip_sanitize=True)
            if args.retry >= 0:
                if old_result:
                    if old_result.passed:
                        logger.info("%s: passed", test_prefix)
                        test_stats.add("skipped", test)
                        result_stats.add_skip(old_result)
                        continue
                    if args.retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)", test_prefix,
                                    result, test.output_directory)
                        test_stats.add("skipped", test)
                        result_stats.add_skip(old_result)
                        continue
                    test_stats.add("retry", test)

            logger.info("%s: starting ...", test_prefix)
            test_stats.add("tests", test)

            # Move the contents of the existing OUTPUT directory to
            # BACKUP_DIRECTORY.  Do it file-by-file so that, at no
            # point, the directory is empty.
            #
            # By moving each test just before it is started a trail of
            # what tests were attempted at each run is left.
            #
            # XXX: During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # Always moving the directory contents to the
            # BACKUP_DIRECTORY mitigates this some.

            saved_output_directory = None
            if os.path.exists(test.output_directory):
                saved_output_directory = os.path.join(args.backup_directory, test.name)
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, saved_output_directory)
                # Copy "empty" OUTPUT directories too.
                args.dry_run or os.makedirs(saved_output_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(saved_output_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    args.dry_run or os.replace(src, dst)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            for attempt in range(args.attempts):
                test_stats.add("attempts", test)

                # Create the OUTPUT directory.
                try:
                    if not args.dry_run:
                        os.mkdir(test.output_directory)
                    elif os.exists(test.output_directory):
                        raise FileExistsError()
                except FileExistsError:
                    # On first attempt, the OUTPUT directory will
                    # be empty (see above) so no need to save.
                    if attempt > 0:
                        saved_output_directory = os.path.join(test.output_directory, str(attempt))
                        logger.info("moving contents of '%s' to '%s'",
                                    test.output_directory, saved_output_directory)
                        args.dry_run or os.makedirs(saved_output_directory, exist_ok=True)
                        for name in os.listdir(test.output_directory):
                            if os.path.isfile(src):
                                src = os.path.join(test.output_directory, name)
                                dst = os.path.join(saved_output_directory, name)
                                logger.debug("moving '%s' to '%s'", src, dst)
                                args.dry_run or os.replace(src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, attempt+1, args.attempts, datetime.now())

                    if saved_output_directory:
                        logger.info("contents of '%s' moved to '%s'",
                                    test.output_directory, saved_output_directory)
                    saved_output_directory = None

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****", test.name)
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    test_stats.add("attempts(%s:%s)" % (ending, result), test)
                    if result.errors:
                        logger.info("****** test %s %s %s ******", test.name, result, result.errors)
                    else:
                        logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT.  During a control-c or crash
            # the below will not be executed.

            test_stats.add("tests(%s)" % result, test)
            result_stats.add_result(result, old_result)

            test_stats.log_summary(logger.info, header="updated stats:",
                                   prefix="    ")
            result_stats.log_summary(logger.info, header="updated results:",
                                     prefix="    ")

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    level = args.verbose and logger.info or logger.debug
    test_stats.log_details(level, header="stat details:", prefix="  ")
    result_stats.log_details(logger.info, header="result details:", prefix="  ")

    test_stats.log_summary(logger.info, header="stat summary:", prefix="  ")
    result_stats.log_summary(logger.info, header="result summary:", prefix="  ")

    end_time = datetime.now()
    logger.info("run finished at %s after %s", end_time, end_time - start_time)

    return 0
Ejemplo n.º 22
0
def main():

    parser = argparse.ArgumentParser(
        description=
        "list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]",
        epilog=
        "By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).  If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified."
    )
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument(
        "--quick",
        action="store_true",
        help=
        ("Use the previously generated '.console.txt' and '.console.diff' files"
         ))
    parser.add_argument(
        "--quick-sanitize",
        action="store_true",
        help=("Use the previously generated '.console.txt' file"))
    parser.add_argument(
        "--quick-diff",
        action="store_true",
        help=("Use the previously generated '.console.diff' file"))
    parser.add_argument(
        "--update",
        action="store_true",
        help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize",
                        action="store_true",
                        help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff",
                        action="store_true",
                        help=("Update the '.console.diff' file"))

    parser.add_argument("--print-directory", action="store_true")
    parser.add_argument("--print-name", action="store_true")
    # parser.add_argument("--print-result", action="store_true")
    parser.add_argument("--print-diff", action="store_true")
    parser.add_argument("--print-args", action="store_true")
    parser.add_argument("--print-output-directory", action="store_true")

    parser.add_argument("--list-ignored",
                        action="store_true",
                        help="include ignored tests in the list")
    parser.add_argument("--list-untested",
                        action="store_true",
                        help="include untested tests in the list")

    parser.add_argument(
        "directories",
        metavar="TEST-DIRECTORY",
        nargs="+",
        help=("Either a testsuite (only one) or test directory"))
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consume all remaining parameters.
    parser.add_argument("baseline",
                        metavar="BASELINE-DIRECTORY",
                        nargs="?",
                        help=("An optional testsuite directory containing"
                              " results from a previous test run"))
    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0
    args.print_directory = args.print_directory or args.verbose > v
    args.print_name = args.print_name or args.verbose > v
    v += 1
    args.print_output_directory = args.print_output_directory or args.verbose > v
    v += 1
    args.list_untested = args.list_untested or args.verbose > v
    v += 1
    args.list_ignored = args.list_ignored or args.verbose > v
    v += 1
    v += 1
    args.print_args = args.print_args or args.verbose > v

    if args.print_args:
        post.log_arguments(logger, args)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        return 1

    # Is the last argument some sort of baseline?  If it is, pre-load
    # it.
    #
    # XXX: Should also support something like --baseline-testsuite and
    # --baseline-output parameters.
    baseline = None
    if len(args.directories) > 1:
        # If there is more than one directory then, perhaps, the last
        # one is a baseline.  A baseline might be: a complete
        # testsuite snapshot; or just output saved as
        # testing/pluto/OUTPUT/TESTDIR.
        baseline = testsuite.load(logger,
                                  args.directories[-1],
                                  args,
                                  error_level=logutil.DEBUG)
        if baseline:
            # discard the last argument as consumed above.
            logger.debug("discarding baseline testsuite argument '%s'",
                         args.directories[-1])
            args.directories.pop()

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    # When an explicit list of directories was specified always print
    # all of them (otherwise, tests seem to get lost).
    if isinstance(tests, list):
        args.list_untested = True

    for test in tests:

        # Produce separate runtimes for each test.
        with logutil.TIMER:

            logger.debug("start processing test %s", test.name)

            # Filter out tests that are being ignored?
            ignore = testsuite.ignore(test, args)
            if ignore and not args.list_ignored:
                continue

            # Filter out tests that have not been run?
            result = None
            if not ignore:
                result = post.mortem(
                    test,
                    args,
                    baseline=baseline,
                    output_directory=test.saved_output_directory,
                    skip_sanitize=args.quick or args.quick_sanitize,
                    skip_diff=args.quick or args.quick_diff,
                    update=args.update,
                    update_sanitize=args.update_sanitize,
                    update_diff=args.update_diff)
                if not result and not args.list_untested:
                    continue

            sep = ""

            # Print the test's name/path
            if not args.print_directory and not args.print_name and not args.print_output_directory:
                # By default: when the path given on the command line
                # explicitly specifies a test's output directory
                # (found in TEST.SAVED_OUTPUT_DIRECTORY), print that;
                # otherwise print the path to the test's directory.
                print(sep, end="")
                print((test.saved_output_directory
                       and test.saved_output_directory or test.directory),
                      end="")
                sep = " "
            else:
                # Print the test name/path per command line
                if args.print_name:
                    print(sep, end="")
                    print(test.name, end="")
                    sep = " "
                if args.print_directory:
                    print(sep, end="")
                    print(test.directory, end="")
                    sep = " "
                if args.print_output_directory:
                    print(sep, end="")
                    print((test.saved_output_directory
                           and test.saved_output_directory
                           or test.output_directory),
                          end="")
                    sep = " "

            if ignore:
                print(sep, end="")
                print("ignored", ignore, end="")
                sep = " "

            print(sep, end="")
            if result.errors:
                print(result, result.errors, end="")
            else:
                print(result, end="")
            sep = " "

            print()

            if args.print_diff and result:
                for domain in result.diffs:
                    for line in result.diffs[domain]:
                        if line:
                            print(line)

            sys.stdout.flush()

            logger.debug("stop processing test %s", test.name)

    return 0
Ejemplo n.º 23
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT", default=1,
                        help="which previously run tests should be retried: 0 selects not-started tests; 1 selects not-started+failed tests; -1 selects not-started+failed+passed tests (default is %(default)s)")
    parser.add_argument("--attempts", type=int, default=1,
                        help="number of times to attempt a test before giving up; default %(default)s")

    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    # Default to BACKUP under the current directory.  Name is
    # arbitrary, chosen for its hopefully unique first letter
    # (avoiding Makefile, OBJ, README, ... :-).
    parser.add_argument("--backup-directory", metavar="DIRECTORY", default="BACKUP",
                        help="backup existing <test>/OUTPUT to %(metavar)s/<date>/<test> (default: %(default)s)")

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry)
    logger.info("  attempts: %s", args.attempts)
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  backup-directory: %s", args.backup_directory)
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    test_stats = stats.Tests()
    result_stats = stats.Results()

    start_time = datetime.now()
    exit_code = 0

    try:
        logger.info("run started at %s", start_time)

        test_count = 0
        for test in tests:

            test_stats.add(test, "total")
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests))

            ignore, details = testsuite.ignore(test, args)
            if ignore:
                result_stats.add_ignored(test, ignore)
                test_stats.add(test, "ignored")
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, details)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.

            # Be lazy with gathering the results, don't run the
            # sanitizer or diff.
            old_result = post.mortem(test, args, skip_diff=True, skip_sanitize=True)
            if args.retry >= 0:
                if old_result:
                    if old_result.passed:
                        logger.info("%s: passed", test_prefix)
                        test_stats.add(test, "skipped")
                        result_stats.add_skipped(old_result)
                        continue
                    if args.retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)", test_prefix,
                                    result, test.output_directory)
                        test_stats.add(test, "skipped")
                        result_stats.add_skipped(old_result)
                        continue
                    test_stats.add(test, "retry")

            logger.info("%s: starting ...", test_prefix)
            test_stats.add(test, "tests")

            # Move the contents of the existing OUTPUT directory to
            # BACKUP_DIRECTORY.  Do it file-by-file so that, at no
            # point, the directory is empty.
            #
            # By moving each test just before it is started a trail of
            # what tests were attempted at each run is left.
            #
            # XXX: During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # Always moving the directory contents to the
            # BACKUP_DIRECTORY mitigates this some.

            saved_output_directory = None
            if os.path.exists(test.output_directory):
                saved_output_directory = os.path.join(args.backup_directory,
                                                      start_time.strftime("%Y%m%d%H%M%S"),
                                                      test.name)
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, saved_output_directory)
                # Copy "empty" OUTPUT directories too.
                args.dry_run or os.makedirs(saved_output_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(saved_output_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    args.dry_run or os.replace(src, dst)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            for attempt in range(args.attempts):
                test_stats.add(test, "attempts")

                # Create the OUTPUT directory.
                try:
                    if not args.dry_run:
                        os.mkdir(test.output_directory)
                    elif os.exists(test.output_directory):
                        raise FileExistsError()
                except FileExistsError:
                    # On first attempt, the OUTPUT directory will
                    # be empty (see above) so no need to save.
                    if attempt > 0:
                        saved_output_directory = os.path.join(test.output_directory, str(attempt))
                        logger.info("moving contents of '%s' to '%s'",
                                    test.output_directory, saved_output_directory)
                        args.dry_run or os.makedirs(saved_output_directory, exist_ok=True)
                        for name in os.listdir(test.output_directory):
                            if os.path.isfile(src):
                                src = os.path.join(test.output_directory, name)
                                dst = os.path.join(saved_output_directory, name)
                                logger.debug("moving '%s' to '%s'", src, dst)
                                args.dry_run or os.replace(src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, attempt+1, args.attempts, datetime.now())

                    if saved_output_directory:
                        logger.info("contents of '%s' moved to '%s'",
                                    test.output_directory, saved_output_directory)
                    saved_output_directory = None

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, args)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        logger.exception("**** test %s timed out ****", test.name)
                        ending = "timed-out"
                        # If the test has no output to check against, this will "pass"
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    test_stats.add(test, "attempts", ending, str(result))
                    if result.errors:
                        logger.info("****** test %s %s %s ******", test.name, result, result.errors)
                    else:
                        logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT.  During a control-c or crash
            # the below will not be executed.

            test_stats.add(test, "tests", str(result))
            result_stats.add_result(result, old_result)

            test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
            result_stats.log_summary(logger.info, header="updated test results:", prefix="  ")

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        exit_code = 1

    test_stats.log_details(args.verbose and logger.info or logger.debug,
                           header="final stat details:", prefix="  ")
    result_stats.log_details(logger.info, header="final test details:", prefix="  ")

    test_stats.log_summary(logger.info, header="final test stats:", prefix="  ")
    result_stats.log_summary(logger.info, header="final test results:", prefix="  ")

    end_time = datetime.now()
    logger.info("run finished at %s after %s", end_time, end_time - start_time)

    return exit_code
Ejemplo n.º 24
0
def results(logger, tests, baseline, args, result_stats):

    for test in tests:

        # Produce separate runtimes for each test.
        with logutil.TIMER:

            logger.debug("start processing test %s", test.name)

            # Filter out tests that are being ignored?
            ignore, details = testsuite.ignore(test, args)
            if ignore:
                result_stats.add_ignored(test, ignore)
            if ignore and not args.list_ignored:
                continue

            # Filter out tests that have not been run
            result = None
            if not ignore and args.print_result:
                result = post.mortem(test, args, baseline=baseline,
                                     output_directory=test.saved_output_directory,
                                     skip_sanitize=args.quick or args.quick_sanitize,
                                     skip_diff=args.quick or args.quick_diff,
                                     update=args.update,
                                     update_sanitize=args.update_sanitize,
                                     update_diff=args.update_diff)
                if not result and not args.list_untested:
                    continue

                if not result:
                    result_stats.add_ignored(test, str(result))
                else:
                    result_stats.add_result(result)

            sep = ""

            # Print the test's name/path
            if not args.prefix_directory and not args.prefix_name and not args.prefix_output_directory:
                # By default: when the path given on the command line
                # explicitly specifies a test's output directory
                # (found in TEST.SAVED_OUTPUT_DIRECTORY), print that;
                # otherwise print the path to the test's directory.
                print(sep, end="")
                print((test.saved_output_directory
                       and test.saved_output_directory
                       or test.directory), end="")
                sep = " "
            else:
                # Print the test name/path per command line
                if args.prefix_name:
                    print(sep, end="")
                    print(test.name, end="")
                    sep = " "
                if args.prefix_directory:
                    print(sep, end="")
                    print(test.directory, end="")
                    sep = " "
                if args.prefix_output_directory:
                    print(sep, end="")
                    print((test.saved_output_directory
                           and test.saved_output_directory
                           or test.output_directory), end="")
                    sep = " "

            if ignore:
                print(sep, end="")
                print("ignored", ignore, end="")
                sep = " "

            if result:
                print(sep, end="")
                if result.errors:
                    print(result, result.errors, end="")
                else:
                    print(result, end="")
                sep = " "

            if args.print_domains:
                for name in test.domain_names():
                    print(sep, end="")
                    print(name, end="")
                    sep = " "

            if args.print_initiators:
                for name in test.initiator_names():
                    print(sep, end="")
                    print(name, end="")
                    sep = " "

            if args.print_scripts:
                for scripts in test.scripts():
                    for name, script in scripts.items():
                        print(sep, end="")
                        print(name + ":" + script, end="")
                        sep = ","
                    sep = " "

            print()

            if args.print_diff and result:
                for domain in result.diffs:
                    for line in result.diffs[domain]:
                        if line:
                            print(line)

            sys.stdout.flush()

            logger.debug("stop processing test %s", test.name)
Ejemplo n.º 25
0
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    suffix = "******"
    test_stats.add(test, "total")

    # Would the number of tests to be [re]run be better?
    test_lapsed_time = timing.Lapsed()
    test_prefix = "%s %s (test %d of %d)" % (suffix, test.name, test_count, tests_count)

    ignored, include_ignored, details = ignore.test(logger, args, test)
    if ignored and not include_ignored:
        result_stats.add_ignored(test, ignored)
        test_stats.add(test, "ignored")
        # No need to log all the ignored tests when an explicit
        # sub-set of tests is being run.  For instance, when running
        # just one test.
        if not args.test_name:
            logger.info("%s ignored (%s)", test_prefix, details)
        return

    # Be lazy with gathering the results, don't run the sanitizer or
    # diff.
    #
    # XXX: There is a bug here where the only difference is white
    # space.  The test will show up as failed when it previousl showed
    # up as a whitespace pass.
    #
    # The presence of the RESULT file is a proxy for detecting that
    # the test was incomplete.

    old_result = post.mortem(test, args, test_finished=None,
                             skip_diff=True, skip_sanitize=True)
    if skip.result(logger, args, old_result):
        logger.info("%s skipped (previously %s)", test_prefix, old_result)
        test_stats.add(test, "skipped")
        result_stats.add_skipped(old_result)
        return

    if old_result:
        test_stats.add(test, "tests", "retry")
        logger.info("%s started (previously %s) ....", test_prefix, old_result)
    else:
        test_stats.add(test, "tests", "try")
        logger.info("%s started ....", test_prefix)
    test_stats.add(test, "tests")

    # Move the contents of the existing OUTPUT directory to
    # BACKUP_DIRECTORY.  Do it file-by-file so that, at no point, the
    # directory is empty.
    #
    # By moving each test just before it is started a trail of what
    # tests were attempted at each run is left.
    #
    # XXX: During boot, swan-transmogrify runs "chcon -R
    # testing/pluto".  Of course this means that each time a test is
    # added and/or a test is run (adding files under <test>/OUTPUT),
    # the boot process (and consequently the time taken to run a test)
    # keeps increasing.
    #
    # Always moving the directory contents to the BACKUP_DIRECTORY
    # mitigates this some.

    backup_directory = None
    if os.path.exists(test.output_directory):
        backup_directory = os.path.join(args.backup_directory, test.name)
        logger.info("moving contents of '%s' to '%s'",
                    test.output_directory, backup_directory)
        # Copy "empty" OUTPUT directories too.
        args.dry_run or os.makedirs(backup_directory, exist_ok=True)
        for name in os.listdir(test.output_directory):
            src = os.path.join(test.output_directory, name)
            dst = os.path.join(backup_directory, name)
            logger.debug("moving '%s' to '%s'", src, dst)
            args.dry_run or os.replace(src, dst)

    debugfile = None
    result = None

    # At least one iteration; above will have filtered out skips and
    # ignored

    for attempt in range(args.attempts):
        test_stats.add(test, "attempts")

        # Create the OUTPUT directory.
        try:
            if not args.dry_run:
                os.mkdir(test.output_directory)
            elif os.exists(test.output_directory):
                raise FileExistsError()
        except FileExistsError:
            # On first attempt, the OUTPUT directory will be empty
            # (see above) so no need to save.
            if attempt > 0:
                backup_directory = os.path.join(test.output_directory, str(attempt))
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, backup_directory)
                args.dry_run or os.makedirs(backup_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    if os.path.isfile(src):
                        src = os.path.join(test.output_directory, name)
                        dst = os.path.join(backup_directory, name)
                        logger.debug("moving '%s' to '%s'", src, dst)
                        args.dry_run or os.replace(src, dst)

        # Start a debug log in the OUTPUT directory; include timing
        # for this specific test attempt.
        with logger.timer_stack(), logger.debug_stack(test.output_directory, "debug.log"):
            attempt_lapsed_time = timing.Lapsed()
            attempt_prefix = "%s (attempt %d of %d)" % (test_prefix, attempt+1, args.attempts)
            logger.info("%s started ....", attempt_prefix)

            if backup_directory:
                logger.info("contents of '%s' moved to '%s'",
                            test.output_directory, backup_directory)
            backup_directory = None

            ending = "undefined"
            try:
                if not args.dry_run:
                    _run_test(domain_prefix, test, args, boot_executor)
                ending = "finished"
                result = post.mortem(test, args, test_finished=True,
                                     update=(not args.dry_run))
                if not args.dry_run:
                    # Store enough to fool the script
                    # pluto-testlist-scan.sh and leave a marker to
                    # indicate that the test finished.
                    logger.info("storing result in '%s'", test.result_file())
                    with open(test.result_file(), "w") as f:
                        f.write('"result": "%s"\n' % result)
            except pexpect.TIMEOUT as e:
                logger.exception("**** test %s timed out ****", test.name)
                ending = "timed-out"
                # Still peform post-mortem so that errors are
                # captured, but force the result to incomplete.
                result = post.mortem(test, args, test_finished=False,
                                     update=(not args.dry_run))

            # Since the OUTPUT directory exists, all paths to here
            # should have a non-null RESULT.
            test_stats.add(test, "attempts", ending, str(result))
            logger.info("%s %s%s%s after %s %s", attempt_prefix, result,
                        result.errors and " ", result.errors,
                        attempt_lapsed_time, suffix)
            if result.passed:
                break

    # Above will have set RESULT.  During a control-c or crash the
    # below will not be executed.

    test_stats.add(test, "tests", str(result))
    result_stats.add_result(result, old_result)

    logger.info("%s %s%s%s after %s %s", test_prefix, result,
                result.errors and " ", result.errors,
                test_lapsed_time, suffix)

    test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
    result_stats.log_summary(logger.info, header="updated test results:", prefix="  ")
Ejemplo n.º 26
0
def results(logger, tests, baseline, args, result_stats):

    failures = 0
    unresolved = 0
    passed = 0
    nr = 0

    for test in tests:

        nr = nr + 1
        publish.json_status(logger, args,
                            "rebuilding %s (test %d of %d)" % (test.name, nr, len(tests)))

        # If debug logging is enabled this will provide fine grained
        # per-test timing.

        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                continue

            # Filter out test results that are being skipped.
            #
            # XXX: In the default case (skip=[UNTESTED]) this should
            # be cheap (does OUTPUT/ exist?).  It isn't, instead a
            # full post-mortem analysis is performed.
            #
            # This is noticeable when printing static test value such
            # as the test's name takes far longer than one would
            # expect.
            result = post.mortem(test, args,
                                 baseline=baseline,
                                 output_directory=test.saved_output_directory,
                                 quick=args.quick)
            if args.update:
                result.save()
            if args.skip:
                if printer.Print.RESULT in args.print \
                and skip.result(logger, args, result):
                    result_stats.add_skipped(result)
                    continue
            result_stats.add_result(result)

            if result.resolution in [post.Resolution.PASSED,
                                     post.Resolution.UNTESTED,
                                     post.Resolution.UNSUPPORTED]:
                passed = passed + 1
            elif result.resolution in [post.Resolution.UNRESOLVED]:
                unresolved = unresolved + 1
            else:
                failures = failures + 1

            publish.test_files(logger, args, result)
            publish.test_output_files(logger, args, result)
            publish.json_result(logger, args, result)

            if baseline and test in baseline \
               and not result.issues.crashed():
                # Since there is a baseline and the test didn't crash
                # limit what is printed to just those where the
                # baseline's result is different.
                #
                # Note that, this skips baseline-different - where the
                # baseline failed for a different reason.
                if {post.Issues.BASELINE_FAILED, post.Issues.BASELINE_PASSED}.isdisjoint(result.issues):
                    continue

            b = args.json and printer.JsonBuilder(sys.stdout) or printer.TextBuilder(sys.stdout)
            printer.build_result(logger, result, baseline, args, args.print, b)

        publish.json_results(logger, args)
        publish.json_summary(logger, args)

    publish.json_status(logger, args, "finished")

    # exit code
    if args.exit_ok:
        return 0
    elif unresolved:
        return 125 # 'git bisect' magic for don't know
    elif failures:
        return 1
    else:
        return 0
Ejemplo n.º 27
0
def _process_test(domain_prefix, test, args, test_stats, result_stats,
                  test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_post_time = None
    old_result = None
    backup_directory = os.path.join(args.backup_directory, test.name)

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    publish.json_status(logger, args, "processing %s" % test_prefix)
    with logger.time("processing test %s", test_prefix):

        # Ignoring the test completely?
        #
        # So that there's no possible confusion over the test being
        # run; remove any pre-existing output.

        ignored, details = ignore.test(logger, args, test)
        if ignored:
            # The isdir() test followed by a simple move, while
            # racy, should be good enough.
            if os.path.isdir(test.output_directory):
                logger.info("moving '%s' to '%s'", test.output_directory,
                            backup_directory)
                os.makedirs(os.path.dirname(backup_directory), exist_ok=True)
                os.rename(test.output_directory, backup_directory)
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details,
                        suffix)
            return

        # Skip the test, leaving old results?
        #
        # For instance, during a test re-run, skip any tests that are
        # passing.
        #
        # The check below compares the test and expected output,
        # ignoring any previous test result.  This way the results are
        # consistent with kvmresults.py which always reflects the
        # current sources.
        #
        # - modifying the expected output so that it no longer matches
        #   the last result is a fail
        #
        # - modifying the expected output so that it matches the last
        #   result is a pass

        old_result = post.mortem(test,
                                 args,
                                 domain_prefix=domain_prefix,
                                 quick=False)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s", prefix,
                        test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            publish.everything(logger, args, old_result)
            return

        # Running the test ...
        #
        # From now on the test will be run so need to perform post
        # mortem.

        try:

            if old_result:
                test_stats.add(test, "tests", "retry")
                logger.info("%s %s started (previously %s) ....", prefix,
                            test_prefix, old_result)
            else:
                test_stats.add(test, "tests", "try")
                logger.info("%s %s started ....", prefix, test_prefix)
            test_stats.add(test, "tests")

            # Create just the OUTPUT/ directory.
            #
            # If the directory already exists, copy the contents
            # BACKUP/.  Do it file-by-file so that, at no point, the
            # OUTPUT/ directory is missing (having an OUTPUT/
            # directory implies the test was started).
            #
            # Don't try to create the path.  If the parent directory
            # is missing, this and the entire script will crash.
            # Someone did something nasty like deleted the parent
            # directory.
            #
            # By backing up each test just before it is started,
            # leaves a trail of what tests were attempted during a
            # test run.
            #
            # XXX:
            #
            # During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # By moving the directory contents to BACKUP/, which is
            # not under testing/pluto/ this problem is avoided.

            try:
                os.mkdir(test.output_directory)
            except FileExistsError:
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, backup_directory)
                # Even if OUTPUT/ is empty, move it.
                os.makedirs(backup_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(backup_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    os.replace(src, dst)

            # Now that the OUTPUT directory is empty, start a debug
            # log writing to that directory; include timing for this
            # test run.

            with logger.debug_time("testing %s",
                                   test_prefix,
                                   logfile=os.path.join(
                                       test.output_directory, "debug.log"),
                                   loglevel=logutil.INFO) as test_runtime:

                # boot the domains
                with logger.time("booting domains") as test_boot_time:
                    try:
                        test_domains = _boot_test_domains(
                            logger, test, domain_prefix, boot_executor)
                    except pexpect.TIMEOUT:
                        # Bail.  Being unable to boot the domains is a
                        # disaster.  The test is UNRESOLVED.
                        logger.exception("TIMEOUT while booting domains")
                        return
                    except pexpect.EOF:
                        # Bail.  Being unable to attach to the domains
                        # is a disaster.  The test is UNRESOLVED.
                        logger.exception("EOF while booting domains")
                        return
                    except:
                        logger.exception("EXCEPTION while booting domains")
                        raise

                # Run the scripts directly
                with logger.time("running scripts %s",
                                 test.host_scripts) as test_script_time:
                    with tcpdump.Dump(
                            logger,
                            domain_prefix,
                            test.output_directory, [
                                test_domain.domain
                                for test_domain in test_domains.values()
                            ],
                            enable=args.tcpdump):

                        try:

                            # re-direct the test-result log file
                            for test_domain in test_domains.values():
                                output = os.path.join(
                                    test.output_directory,
                                    test_domain.domain.host_name +
                                    ".console.verbose.txt")
                                test_domain.console.redirect_output(
                                    open(output, "w"))

                            # If a script times out, don't try to run
                            # post-mortem.sh.
                            host_timed_out = None

                            for script in test.host_scripts:
                                test_domain = test_domains[script.host_name]
                                try:
                                    test_domain.read_file_run(script.path)
                                except pexpect.TIMEOUT as e:
                                    # A timeout while running a test
                                    # script is a sign that a command
                                    # hung.
                                    message = "%s while running script %s" % (
                                        post.Issues.TIMEOUT, script)
                                    logger.warning("*** %s ***" % message)
                                    test_domain.console.append_output(
                                        "%s %s %s", post.LHS, message,
                                        post.RHS)
                                    host_timed_out = script.host_name
                                    break
                                except pexpect.EOF as e:
                                    # An EOF while a script is running
                                    # is a sign that libvirt crashed.
                                    message = "%s while running script %s" % (
                                        post.Issues.EOF, script)
                                    logger.exception("*** %s ***" % message)
                                    test_domain.console.append_output(
                                        "%s %s %s", post.LHS, message,
                                        post.RHS)
                                    host_timed_out = script.host_name
                                    break
                                except BaseException as e:
                                    # if there is an exception, write
                                    # it to the console
                                    message = "%s %s while running script %s" % (
                                        post.Issues.EXCEPTION, str(e), script)
                                    logger.exception("*** %s ***" % message)
                                    test_domain.console.append_output(
                                        "\n%s %s %s\n", post.LHS, message,
                                        post.RHS)
                                    raise

                            if args.run_post_mortem is False:
                                logger.warning(
                                    "+++ skipping script post-mortem.sh -- disabled +++"
                                )
                            elif host_timed_out:
                                logger.warning(
                                    "+++ skipping script post-mortem.sh -- %s timed out +++"
                                    % (host_timed_out))
                            else:  # None or True
                                script = "../bin/post-mortem.sh"
                                for host_name in test.host_names:
                                    test_domain = test_domains[host_name]
                                    test_domain.console.append_output(
                                        "%s post-mortem %s", post.LHS,
                                        post.LHS)
                                    logger.info("running %s on %s", script,
                                                host_name)
                                    try:
                                        status = test_domain.console.run(
                                            script)
                                        if status:
                                            logger.error(
                                                "%s failed on %s with status %s",
                                                script, host_name, status)
                                        else:
                                            test_domain.console.append_output(
                                                "%s post-mortem %s", post.RHS,
                                                post.RHS)
                                    except pexpect.TIMEOUT as e:
                                        # A post-mortem ending with a
                                        # TIMEOUT gets treated as a
                                        # FAIL.
                                        message = "%s while running script %s" % (
                                            post.Issues.TIMEOUT, script)
                                        logger.warning("*** %s ***" % message)
                                        test_domain.console.append_output(
                                            "%s %s %s", post.LHS, message,
                                            post.RHS)
                                        continue  # always teardown
                                    except pexpect.EOF as e:
                                        # A post-mortem ending with an
                                        # EOF gets treated as
                                        # unresloved.
                                        message = "%s while running script %s" % (
                                            post.Issues.EOF, script)
                                        logger.exception("*** %s ***" %
                                                         message)
                                        test_domain.console.append_output(
                                            "%s %s %s", post.LHS, message,
                                            post.RHS)
                                        continue  # always teardown
                                    except BaseException as e:
                                        # if there is an exception, write
                                        # it to the console
                                        message = "%s %s while running script %s" % (
                                            post.Issues.EXCEPTION, str(e),
                                            script)
                                        logger.exception(message)
                                        test_domain.console.append_output(
                                            "\n%s %s %s\n", post.LHS, message,
                                            post.RHS)
                                        raise

                            for test_domain in test_domains.values():
                                test_domain.console.append_output(post.DONE)

                        finally:

                            # Close the redirected test-result log files
                            logger.info(
                                "closing all the test domain log files")
                            for test_domain in test_domains.values():
                                test_domain.console.close_output()

                            # Always disconnect from the test domains.
                            logger.info("closing all the test domains")
                            for test_domain in test_domains.values():
                                test_domain.close()

        finally:

            with logger.time("post-mortem %s", test_prefix):
                # The test finished; it is assumed that post.mortem
                # can deal with a crashed test.
                result = post.mortem(test, args, domain_prefix=domain_prefix)
                logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                            result.issues and " ", result.issues, suffix)

            result.save()

            # If the test was run (a fresh run would delete RESULT)
            # and finished (resolved in POSIX terminology), emit
            # enough JSON to fool scripts like pluto-testlist-scan.sh.
            #
            # A test that timed-out or crashed, isn't considered
            # resolved so the file isn't created.
            #
            # XXX: this should go away.

            result_file = os.path.join(test.output_directory, "RESULT")
            if not os.path.isfile(result_file) \
            and result.resolution.isresolved():
                RESULT = {
                    jsonutil.result.testname:
                    test.name,
                    jsonutil.result.expect:
                    test.status,
                    jsonutil.result.result:
                    result,
                    jsonutil.result.issues:
                    result.issues,
                    jsonutil.result.hosts:
                    test.host_names,
                    jsonutil.result.time:
                    jsonutil.ftime(test_runtime.start),
                    jsonutil.result.runtime:
                    round(test_runtime.seconds(), 1),
                    jsonutil.result.boot_time:
                    round(test_boot_time.seconds(), 1),
                    jsonutil.result.script_time:
                    round(test_script_time.seconds(), 1),
                }
                j = jsonutil.dumps(RESULT)
                logger.debug("filling '%s' with json: %s", result_file, j)
                with open(result_file, "w") as f:
                    f.write(j)
                    f.write("\n")

            # Do this after RESULT is created so it too is published.
            publish.everything(logger, args, result)
            publish.json_status(logger, args, "finished %s" % test_prefix)

            test_stats.add(test, "tests", str(result))
            result_stats.add_result(result, old_result)
            # test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
            result_stats.log_summary(logger.info,
                                     header="updated test results:",
                                     prefix="  ")
Ejemplo n.º 28
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT",
                        help=("number of times a test should be attempted before giving up"
                              " (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed)"
                              "; a negative %(metavar)s selects all tests"
                              "; a zero %(metavar)s selects not-started tests"
                              "; a positive %(metavar)s selects not-started, incomplete and failing tests"
                              "; default is to select not-started tests"))
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help=("Either a testsuite directory or"
                              " a list of test directories"))
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directory)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1;
        logger.info("Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        for test in tests:
            stats.add("total", test)

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("*** %s: ignore (%s)", test.name, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("*** %s: passed", test.name)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("*** %s: %s (delete '%s' to re-test)", test.name,
                                    result.value, test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            runs = max(abs(retry), 1)
            for run in range(runs):
                stats.add("runs", test)

                # Create an output directory.  If there's already an
                # existing OUTPUT directory rename it to OUTPUT...
                # Need to do this before the OUTPUT/debug.log is
                # started as otherwise it too would get moved away.
                saved_output_directory = None
                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        stats.add("reruns", test)
                        # Include the time this test run started in
                        # the suffix - that way all saved results can
                        # be matched using a wild card.  Include the
                        # time the directory was last modified in the
                        # suffix - it makes a good approximation as to
                        # when the previous test run finished.
                        stat = os.stat(test.output_directory)
                        mtime = time.localtime(os.stat(test.output_directory).st_mtime)
                        saved_output_directory = (test.output_directory
                                                + time.strftime(".%Y%m%d%H%M", start_time)
                                                + time.strftime(".%H%M%S", mtime))
                        logger.debug("renaming '%s' to '%s'",
                                     test.output_directory, saved_output_directory)
                        os.rename(test.output_directory, saved_output_directory)
                        # if the second attempt fails, let it crash
                        os.mkdir(test.output_directory)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test run.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, run+1, runs, datetime.now())
                    # Add a log message about any saved output
                    # directory to the per-test-run debug log.  It
                    # just looks better.
                    if saved_output_directory:
                        logger.info("existing OUTPUT saved in '%s'",
                                    saved_output_directory)
                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "')
                                f.write(result.value)
                                f.write('"')
                                f.write("\n")
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****", test.name)
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("runs(%s:%s)" % (ending, result.value), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result.value, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Ejemplo n.º 29
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument(
        "--retry",
        type=int,
        metavar="COUNT",
        help=
        ("number of times a test should be attempted before giving up"
         " (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed)"
         "; a negative %(metavar)s selects all tests"
         "; a zero %(metavar)s selects not-started tests"
         "; a positive %(metavar)s selects not-started, incomplete and failing tests"
         "; default is to select not-started tests"))
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("directories",
                        metavar="DIRECTORY",
                        nargs="*",
                        help=("Either a testsuite directory or"
                              " a list of test directories"))
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger,
                                              args.directories,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directory)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1
        logger.info(
            "Explicit directory list; forcing --retry=%d (retry failed tests)",
            args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        for test in tests:
            stats.add("total", test)

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("*** %s: ignore (%s)", test.name, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("*** %s: passed", test.name)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("*** %s: %s (delete '%s' to re-test)",
                                    test.name, result.value,
                                    test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            runs = max(abs(retry), 1)
            for run in range(runs):
                stats.add("runs", test)

                # Create an output directory.  If there's already an
                # existing OUTPUT directory rename it to OUTPUT...
                # Need to do this before the OUTPUT/debug.log is
                # started as otherwise it too would get moved away.
                saved_output_directory = None
                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        stats.add("reruns", test)
                        # Include the time this test run started in
                        # the suffix - that way all saved results can
                        # be matched using a wild card.  Include the
                        # time the directory was last modified in the
                        # suffix - it makes a good approximation as to
                        # when the previous test run finished.
                        stat = os.stat(test.output_directory)
                        mtime = time.localtime(
                            os.stat(test.output_directory).st_mtime)
                        saved_output_directory = (
                            test.output_directory +
                            time.strftime(".%Y%m%d%H%M", start_time) +
                            time.strftime(".%H%M%S", mtime))
                        logger.debug("renaming '%s' to '%s'",
                                     test.output_directory,
                                     saved_output_directory)
                        os.rename(test.output_directory,
                                  saved_output_directory)
                        # if the second attempt fails, let it crash
                        os.mkdir(test.output_directory)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test run.
                with logutil.TIMER, logutil.Debug(
                        logger, os.path.join(test.output_directory,
                                             "debug.log")):
                    logger.info(
                        "****** test %s attempt %d of %d started at %s ******",
                        test.name, run + 1, runs, datetime.now())
                    # Add a log message about any saved output
                    # directory to the per-test-run debug log.  It
                    # just looks better.
                    if saved_output_directory:
                        logger.info("existing OUTPUT saved in '%s'",
                                    saved_output_directory)
                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test,
                                             args,
                                             update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'",
                                        test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "')
                                f.write(result.value)
                                f.write('"')
                                f.write("\n")
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****",
                                         test.name)
                        result = post.mortem(test,
                                             args,
                                             update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("runs(%s:%s)" % (ending, result.value), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result.value, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Ejemplo n.º 30
0
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_post_time = None
    old_result = None
    backup_directory = os.path.join(args.backup_directory, test.name)

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    publish.json_status(logger, args, "processing %s" % test_prefix)
    with logger.time("processing test %s", test_prefix):

        # Ignoring the test completely?
        #
        # So that there's no possible confusion over the test being
        # run; remove any pre-existing output.

        ignored, details = ignore.test(logger, args, test)
        if ignored:
            # The isdir() test followed by a simple move, while
            # racy, should be good enough.
            if os.path.isdir(test.output_directory):
                logger.info("moving '%s' to '%s'", test.output_directory,
                            backup_directory)
                os.makedirs(os.path.dirname(backup_directory), exist_ok=True)
                os.rename(test.output_directory, backup_directory)
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            logger.info("%s %s ignored (%s) %s",
                        prefix, test_prefix, details, suffix)
            return

        # Skip the test, leaving old results?
        #
        # For instance, during a test re-run, skip any tests that are
        # passing.
        #
        # The check below compares the test and expected output,
        # ignoring any previous test result.  This way the results are
        # consistent with kvmresults.py which always reflects the
        # current sources.
        #
        # - modifying the expected output so that it no longer matches
        #   the last result is a fail
        #
        # - modifying the expected output so that it matches the last
        #   result is a pass

        old_result = post.mortem(test, args, domain_prefix=domain_prefix, quick=False)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s",
                        prefix, test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            publish.everything(logger, args, old_result)
            return

        # Running the test ...
        #
        # From now on the test will be run so need to perform post
        # mortem.

        try:

            if old_result:
                test_stats.add(test, "tests", "retry")
                logger.info("%s %s started (previously %s) ....",
                            prefix, test_prefix, old_result)
            else:
                test_stats.add(test, "tests", "try")
                logger.info("%s %s started ....", prefix, test_prefix)
            test_stats.add(test, "tests")

            # Create just the OUTPUT/ directory.
            #
            # If the directory already exists, copy the contents
            # BACKUP/.  Do it file-by-file so that, at no point, the
            # OUTPUT/ directory is missing (having an OUTPUT/
            # directory implies the test was started).
            #
            # Don't try to create the path.  If the parent directory
            # is missing, this and the entire script will crash.
            # Someone did something nasty like deleted the parent
            # directory.
            #
            # By backing up each test just before it is started,
            # leaves a trail of what tests were attempted during a
            # test run.
            #
            # XXX:
            #
            # During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # By moving the directory contents to BACKUP/, which is
            # not under testing/pluto/ this problem is avoided.

            try:
                os.mkdir(test.output_directory)
            except FileExistsError:
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, backup_directory)
                # Even if OUTPUT/ is empty, move it.
                os.makedirs(backup_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(backup_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    os.replace(src, dst)

            # Now that the OUTPUT directory is empty, start a debug
            # log writing to that directory; include timing for this
            # test run.

            with logger.debug_time("testing %s", test_prefix,
                                   logfile=os.path.join(test.output_directory,
                                                        "debug.log"),
                                   loglevel=logutil.INFO) as test_runtime:

                # boot the domains
                with logger.time("booting domains") as test_boot_time:
                    try:
                        test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor)
                    except pexpect.TIMEOUT:
                        # Bail.  Being unable to boot the domains is a
                        # disaster.  The test is UNRESOLVED.
                        logger.exception("timeout while booting domains")
                        return
                    except pexpect.EOF:
                        # Bail.  Being unable to attach to the domains
                        # is a disaster.  The test is UNRESOLVED.
                        logger.exception("eof (disconnect) while booting domains")
                        return

                # Run the scripts directly
                with logger.time("running scripts %s",
                                 " ".join(("%s:%s" % (host, script))
                                          for host, script in test.host_script_tuples)) as test_script_time:
                    with tcpdump.Dump(logger, domain_prefix, test.output_directory,
                                      [test_domain.domain for test_domain in test_domains.values()],
                                      enable=args.tcpdump):

                        try:

                            # re-direct the test-result log file
                            for test_domain in test_domains.values():
                                output = os.path.join(test.output_directory,
                                                      test_domain.domain.host_name + ".console.verbose.txt")
                                test_domain.console.output(open(output, "w"))

                            for host, script in test.host_script_tuples:
                                if args.stop_at == script:
                                    logger.error("stopping test run at (before executing) script %s", script)
                                    break
                                test_domain = test_domains[host]
                                try:
                                    test_domain.read_file_run(script)
                                except pexpect.TIMEOUT as e:
                                    # A test ending with a timeout
                                    # gets treated as a FAIL.  A
                                    # timeout while running a test
                                    # script is a sign that a command
                                    # hung.
                                    post_timeout = "%s %s:%s" % (post.TIMEOUT, host, script)
                                    logger.warning("*** %s ***" % post_timeout)
                                    test_domain.console.child.logfile.write("%s %s %s" % (post.LHS, post_timeout, post.RHS))
                                    break
                                except BaseException as e:
                                    # if there is an exception, write
                                    # it to the console
                                    test_domain.console.child.logfile.write("\n%s %s %s:%s %rhs\n%s" % (post.LHS, post.EXCEPTION, host, script, post.RHS, str(e)))
                                    raise

                            for test_domain in test_domains.values():
                                test_domain.console.child.logfile.write(post.DONE)

                        finally:

                            # Close the redirected test-result log files
                            logger.info("closing all the test domain log files")
                            for test_domain in test_domains.values():
                                outfile = test_domain.console.output()
                                outfile.close()

                            # Always disconnect from the test domains.
                            logger.info("closing all the test domains")
                            for test_domain in test_domains.values():
                                test_domain.close()

        finally:

            with logger.time("post-mortem %s", test_prefix):
                # The test finished; it is assumed that post.mortem
                # can deal with a crashed test.
                result = post.mortem(test, args, domain_prefix=domain_prefix)
                logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                            result.issues and " ", result.issues, suffix)

            result.save()

            # If the test was run (a fresh run would delete RESULT)
            # and finished (resolved in POSIX terminology), emit
            # enough JSON to fool scripts like pluto-testlist-scan.sh.
            #
            # A test that timed-out or crashed, isn't considered
            # resolved so the file isn't created.
            #
            # XXX: this should go away.

            result_file = os.path.join(test.output_directory, "RESULT")
            if not os.path.isfile(result_file) \
            and result.resolution.isresolved():
                RESULT = {
                    jsonutil.result.testname: test.name,
                    jsonutil.result.expect: test.status,
                    jsonutil.result.result: result,
                    jsonutil.result.issues: result.issues,
                    jsonutil.result.hosts: test.host_names,
                    jsonutil.result.time: jsonutil.ftime(test_runtime.start),
                    jsonutil.result.runtime: round(test_runtime.seconds(), 1),
                    jsonutil.result.boot_time: round(test_boot_time.seconds(), 1),
                    jsonutil.result.script_time: round(test_script_time.seconds(), 1),
                }
                j = jsonutil.dumps(RESULT)
                logger.debug("filling '%s' with json: %s", result_file, j)
                with open(result_file, "w") as f:
                    f.write(j)
                    f.write("\n")

            # Do this after RESULT is created so it too is published.
            publish.everything(logger, args, result)
            publish.json_status(logger, args, "finished %s" % test_prefix)

            test_stats.add(test, "tests", str(result))
            result_stats.add_result(result, old_result)
            # test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
            result_stats.log_summary(logger.info, header="updated test results:", prefix="  ")