Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT", default=1,
                        help="which previously run tests should be retried: 0 selects not-started tests; 1 selects not-started+failed tests; -1 selects not-started+failed+passed tests (default is %(default)s)")
    parser.add_argument("--attempts", type=int, default=1,
                        help="number of times to attempt a test before giving up; default %(default)s")

    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    # Default to BACKUP under the current directory.  Name is
    # arbitrary, chosen for its hopefully unique first letter
    # (avoiding Makefile, OBJ, README, ... :-).
    parser.add_argument("--backup-directory", metavar="DIRECTORY", default="BACKUP",
                        help="backup existing <test>/OUTPUT to %(metavar)s/<date>/<test> (default: %(default)s)")

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry)
    logger.info("  attempts: %s", args.attempts)
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  backup-directory: %s", args.backup_directory)
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    test_stats = stats.Tests()
    result_stats = stats.Results()

    start_time = datetime.now()
    exit_code = 0

    try:
        logger.info("run started at %s", start_time)

        test_count = 0
        for test in tests:

            test_stats.add(test, "total")
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests))

            ignore, details = testsuite.ignore(test, args)
            if ignore:
                result_stats.add_ignored(test, ignore)
                test_stats.add(test, "ignored")
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, details)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.

            # Be lazy with gathering the results, don't run the
            # sanitizer or diff.
            old_result = post.mortem(test, args, skip_diff=True, skip_sanitize=True)
            if args.retry >= 0:
                if old_result:
                    if old_result.passed:
                        logger.info("%s: passed", test_prefix)
                        test_stats.add(test, "skipped")
                        result_stats.add_skipped(old_result)
                        continue
                    if args.retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)", test_prefix,
                                    result, test.output_directory)
                        test_stats.add(test, "skipped")
                        result_stats.add_skipped(old_result)
                        continue
                    test_stats.add(test, "retry")

            logger.info("%s: starting ...", test_prefix)
            test_stats.add(test, "tests")

            # Move the contents of the existing OUTPUT directory to
            # BACKUP_DIRECTORY.  Do it file-by-file so that, at no
            # point, the directory is empty.
            #
            # By moving each test just before it is started a trail of
            # what tests were attempted at each run is left.
            #
            # XXX: During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # Always moving the directory contents to the
            # BACKUP_DIRECTORY mitigates this some.

            saved_output_directory = None
            if os.path.exists(test.output_directory):
                saved_output_directory = os.path.join(args.backup_directory,
                                                      start_time.strftime("%Y%m%d%H%M%S"),
                                                      test.name)
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, saved_output_directory)
                # Copy "empty" OUTPUT directories too.
                args.dry_run or os.makedirs(saved_output_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(saved_output_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    args.dry_run or os.replace(src, dst)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            for attempt in range(args.attempts):
                test_stats.add(test, "attempts")

                # Create the OUTPUT directory.
                try:
                    if not args.dry_run:
                        os.mkdir(test.output_directory)
                    elif os.exists(test.output_directory):
                        raise FileExistsError()
                except FileExistsError:
                    # On first attempt, the OUTPUT directory will
                    # be empty (see above) so no need to save.
                    if attempt > 0:
                        saved_output_directory = os.path.join(test.output_directory, str(attempt))
                        logger.info("moving contents of '%s' to '%s'",
                                    test.output_directory, saved_output_directory)
                        args.dry_run or os.makedirs(saved_output_directory, exist_ok=True)
                        for name in os.listdir(test.output_directory):
                            if os.path.isfile(src):
                                src = os.path.join(test.output_directory, name)
                                dst = os.path.join(saved_output_directory, name)
                                logger.debug("moving '%s' to '%s'", src, dst)
                                args.dry_run or os.replace(src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, attempt+1, args.attempts, datetime.now())

                    if saved_output_directory:
                        logger.info("contents of '%s' moved to '%s'",
                                    test.output_directory, saved_output_directory)
                    saved_output_directory = None

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, args)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        logger.exception("**** test %s timed out ****", test.name)
                        ending = "timed-out"
                        # If the test has no output to check against, this will "pass"
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    test_stats.add(test, "attempts", ending, str(result))
                    if result.errors:
                        logger.info("****** test %s %s %s ******", test.name, result, result.errors)
                    else:
                        logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT.  During a control-c or crash
            # the below will not be executed.

            test_stats.add(test, "tests", str(result))
            result_stats.add_result(result, old_result)

            test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
            result_stats.log_summary(logger.info, header="updated test results:", prefix="  ")

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        exit_code = 1

    test_stats.log_details(args.verbose and logger.info or logger.debug,
                           header="final stat details:", prefix="  ")
    result_stats.log_details(logger.info, header="final test details:", prefix="  ")

    test_stats.log_summary(logger.info, header="final test stats:", prefix="  ")
    result_stats.log_summary(logger.info, header="final test results:", prefix="  ")

    end_time = datetime.now()
    logger.info("run finished at %s after %s", end_time, end_time - start_time)

    return exit_code
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument(
        "--retry",
        type=int,
        metavar="COUNT",
        help=
        ("number of times a test should be attempted before giving up"
         " (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed)"
         "; a negative %(metavar)s selects all tests"
         "; a zero %(metavar)s selects not-started tests"
         "; a positive %(metavar)s selects not-started, incomplete and failing tests"
         "; default is to select not-started tests"))
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("directories",
                        metavar="DIRECTORY",
                        nargs="*",
                        help=("Either a testsuite directory or"
                              " a list of test directories"))
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger,
                                              args.directories,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directory)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1
        logger.info(
            "Explicit directory list; forcing --retry=%d (retry failed tests)",
            args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        for test in tests:
            stats.add("total", test)

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("*** %s: ignore (%s)", test.name, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("*** %s: passed", test.name)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("*** %s: %s (delete '%s' to re-test)",
                                    test.name, result.value,
                                    test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            runs = max(abs(retry), 1)
            for run in range(runs):
                stats.add("runs", test)

                # Create an output directory.  If there's already an
                # existing OUTPUT directory rename it to OUTPUT...
                # Need to do this before the OUTPUT/debug.log is
                # started as otherwise it too would get moved away.
                saved_output_directory = None
                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        stats.add("reruns", test)
                        # Include the time this test run started in
                        # the suffix - that way all saved results can
                        # be matched using a wild card.  Include the
                        # time the directory was last modified in the
                        # suffix - it makes a good approximation as to
                        # when the previous test run finished.
                        stat = os.stat(test.output_directory)
                        mtime = time.localtime(
                            os.stat(test.output_directory).st_mtime)
                        saved_output_directory = (
                            test.output_directory +
                            time.strftime(".%Y%m%d%H%M", start_time) +
                            time.strftime(".%H%M%S", mtime))
                        logger.debug("renaming '%s' to '%s'",
                                     test.output_directory,
                                     saved_output_directory)
                        os.rename(test.output_directory,
                                  saved_output_directory)
                        # if the second attempt fails, let it crash
                        os.mkdir(test.output_directory)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test run.
                with logutil.TIMER, logutil.Debug(
                        logger, os.path.join(test.output_directory,
                                             "debug.log")):
                    logger.info(
                        "****** test %s attempt %d of %d started at %s ******",
                        test.name, run + 1, runs, datetime.now())
                    # Add a log message about any saved output
                    # directory to the per-test-run debug log.  It
                    # just looks better.
                    if saved_output_directory:
                        logger.info("existing OUTPUT saved in '%s'",
                                    saved_output_directory)
                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test,
                                             args,
                                             update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'",
                                        test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "')
                                f.write(result.value)
                                f.write('"')
                                f.write("\n")
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****",
                                         test.name)
                        result = post.mortem(test,
                                             args,
                                             update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("runs(%s:%s)" % (ending, result.value), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result.value, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT", default=1,
                        help="which previously run tests should be retried: 0 selects not-started tests; 1 selects not-started+failed tests; -1 selects not-started+failed+passed tests (default is %(default)s)")
    parser.add_argument("--attempts", type=int, default=1,
                        help="number of times to attempt a test before giving up; default %(default)s")

    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    # Default to BACKUP under the current directory.  Name is
    # arbitrary, chosen for its hopefully unique first letter
    # (avoiding Makefile, OBJ, README, ... :-).
    parser.add_argument("--backup-directory", metavar="DIRECTORY",
                        default=os.path.join("BACKUP", time.strftime("%Y%m%d%H%M%S", time.localtime())),
                        help="backup existing <test>/OUTPUT to %(metavar)s/<test> (default: %(default)s)")

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry)
    logger.info("  attempts: %s", args.attempts)
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  backup-directory: %s", args.backup_directory)
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    test_stats = stats.Tests()
    result_stats = stats.Results()

    start_time = datetime.now()

    try:
        logger.info("run started at %s", start_time)

        test_count = 0
        for test in tests:

            test_stats.add("total", test)
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests))

            ignore = testsuite.ignore(test, args)
            if ignore:
                result_stats.add_ignore(test, ignore)
                test_stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.

            # Be lazy with gathering the results, don't run the
            # sanitizer or diff.
            old_result = post.mortem(test, args, skip_diff=True, skip_sanitize=True)
            if args.retry >= 0:
                if old_result:
                    if old_result.passed:
                        logger.info("%s: passed", test_prefix)
                        test_stats.add("skipped", test)
                        result_stats.add_skip(old_result)
                        continue
                    if args.retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)", test_prefix,
                                    result, test.output_directory)
                        test_stats.add("skipped", test)
                        result_stats.add_skip(old_result)
                        continue
                    test_stats.add("retry", test)

            logger.info("%s: starting ...", test_prefix)
            test_stats.add("tests", test)

            # Move the contents of the existing OUTPUT directory to
            # BACKUP_DIRECTORY.  Do it file-by-file so that, at no
            # point, the directory is empty.
            #
            # By moving each test just before it is started a trail of
            # what tests were attempted at each run is left.
            #
            # XXX: During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # Always moving the directory contents to the
            # BACKUP_DIRECTORY mitigates this some.

            saved_output_directory = None
            if os.path.exists(test.output_directory):
                saved_output_directory = os.path.join(args.backup_directory, test.name)
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, saved_output_directory)
                # Copy "empty" OUTPUT directories too.
                args.dry_run or os.makedirs(saved_output_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(saved_output_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    args.dry_run or os.replace(src, dst)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            for attempt in range(args.attempts):
                test_stats.add("attempts", test)

                # Create the OUTPUT directory.
                try:
                    if not args.dry_run:
                        os.mkdir(test.output_directory)
                    elif os.exists(test.output_directory):
                        raise FileExistsError()
                except FileExistsError:
                    # On first attempt, the OUTPUT directory will
                    # be empty (see above) so no need to save.
                    if attempt > 0:
                        saved_output_directory = os.path.join(test.output_directory, str(attempt))
                        logger.info("moving contents of '%s' to '%s'",
                                    test.output_directory, saved_output_directory)
                        args.dry_run or os.makedirs(saved_output_directory, exist_ok=True)
                        for name in os.listdir(test.output_directory):
                            if os.path.isfile(src):
                                src = os.path.join(test.output_directory, name)
                                dst = os.path.join(saved_output_directory, name)
                                logger.debug("moving '%s' to '%s'", src, dst)
                                args.dry_run or os.replace(src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, attempt+1, args.attempts, datetime.now())

                    if saved_output_directory:
                        logger.info("contents of '%s' moved to '%s'",
                                    test.output_directory, saved_output_directory)
                    saved_output_directory = None

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****", test.name)
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    test_stats.add("attempts(%s:%s)" % (ending, result), test)
                    if result.errors:
                        logger.info("****** test %s %s %s ******", test.name, result, result.errors)
                    else:
                        logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT.  During a control-c or crash
            # the below will not be executed.

            test_stats.add("tests(%s)" % result, test)
            result_stats.add_result(result, old_result)

            test_stats.log_summary(logger.info, header="updated stats:",
                                   prefix="    ")
            result_stats.log_summary(logger.info, header="updated results:",
                                     prefix="    ")

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    level = args.verbose and logger.info or logger.debug
    test_stats.log_details(level, header="stat details:", prefix="  ")
    result_stats.log_details(logger.info, header="result details:", prefix="  ")

    test_stats.log_summary(logger.info, header="stat summary:", prefix="  ")
    result_stats.log_summary(logger.info, header="result summary:", prefix="  ")

    end_time = datetime.now()
    logger.info("run finished at %s after %s", end_time, end_time - start_time)

    return 0
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT",
                        help=("number of times a test should be attempted before giving up"
                              " (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed)"
                              "; a negative %(metavar)s selects all tests"
                              "; a zero %(metavar)s selects not-started tests"
                              "; a positive %(metavar)s selects not-started, incomplete and failing tests"
                              "; default is to select not-started tests"))
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help=("Either a testsuite directory or"
                              " a list of test directories"))
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directory)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1;
        logger.info("Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        for test in tests:
            stats.add("total", test)

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("*** %s: ignore (%s)", test.name, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("*** %s: passed", test.name)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("*** %s: %s (delete '%s' to re-test)", test.name,
                                    result.value, test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            runs = max(abs(retry), 1)
            for run in range(runs):
                stats.add("runs", test)

                # Create an output directory.  If there's already an
                # existing OUTPUT directory rename it to OUTPUT...
                # Need to do this before the OUTPUT/debug.log is
                # started as otherwise it too would get moved away.
                saved_output_directory = None
                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        stats.add("reruns", test)
                        # Include the time this test run started in
                        # the suffix - that way all saved results can
                        # be matched using a wild card.  Include the
                        # time the directory was last modified in the
                        # suffix - it makes a good approximation as to
                        # when the previous test run finished.
                        stat = os.stat(test.output_directory)
                        mtime = time.localtime(os.stat(test.output_directory).st_mtime)
                        saved_output_directory = (test.output_directory
                                                + time.strftime(".%Y%m%d%H%M", start_time)
                                                + time.strftime(".%H%M%S", mtime))
                        logger.debug("renaming '%s' to '%s'",
                                     test.output_directory, saved_output_directory)
                        os.rename(test.output_directory, saved_output_directory)
                        # if the second attempt fails, let it crash
                        os.mkdir(test.output_directory)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test run.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, run+1, runs, datetime.now())
                    # Add a log message about any saved output
                    # directory to the per-test-run debug log.  It
                    # just looks better.
                    if saved_output_directory:
                        logger.info("existing OUTPUT saved in '%s'",
                                    saved_output_directory)
                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "')
                                f.write(result.value)
                                f.write('"')
                                f.write("\n")
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****", test.name)
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("runs(%s:%s)" % (ending, result.value), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result.value, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Esempio n. 5
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT",
                        help="number of times a test should be attempted before giving up (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed); a negative %(metavar)s selects all tests; a zero %(metavar)s selects not-started tests; a positive %(metavar)s selects not-started, incomplete and failing tests; default is to select not-started tests")
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("--output-directory", default=None, metavar="DIRECTORY",
                        help="save test results as %(metavar)s/<test> instead of <test>/OUTPUT")
    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  output-directory: %s", args.output_directory or "<testsuite>/<test>/OUTPUT (default)")
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              testsuite_output_directory=args.output_directory,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1;
        logger.info("Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        test_count = 0
        for test in tests:
            stats.add("total", test)
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests))

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("%s: passed", test_prefix)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)", test_prefix,
                                    result, test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    stats.add("retry", test)

            logger.info("%s: starting ...", test_prefix)
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            attempts = max(abs(retry), 1)
            for attempt in range(attempts):
                stats.add("attempts", test)

                # On first attempt (attempt == 0), empty the
                # <test>/OUTPUT/ directory of all contents.  On
                # subsequent attempts, move the files from the
                # previous attempt to <test>/OUTPUT/<attempt>/.
                #
                # XXX: Don't just delete the OUTPUT/ directory as
                # this, for a short period, changes the status of the
                # test to never-run.
                #
                # XXX: During boot, swan-transmogrify runs "chcon -R
                # testing/pluto".  Of course this means that each time
                # a test is added and/or a test is run (adding files
                # under <test>/OUTPUT), the boot process (and
                # consequently the time taken to run a test) keeps
                # increasing.
                #
                # Mitigate this slightly by emptying <test>/OUTPUT
                # before starting any test attempts.  It's assumed
                # that the previous test run was already captured
                # above with save-directory.

                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        saved_output_directory = os.path.join(test.output_directory, str(attempt))
                        logger.info("emptying directory '%s'", test.output_directory)
                        for name in os.listdir(test.output_directory):
                            src = os.path.join(test.output_directory, name)
                            if attempt == 0:
                                logger.debug("  remove '%s'", src)
                                if os.path.isfile(src):
                                    os.remove(src)
                                else:
                                    shutil.rmtree(src)
                            elif os.path.isfile(src):
                                dst = os.path.join(saved_output_directory, name)
                                logger.debug("  move '%s' to '%s'", src, dst)
                                os.makedirs(saved_output_directory, exist_ok=True)
                                os.rename(src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, attempt+1, attempts, datetime.now())

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****", test.name)
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("attempts(%s:%s)" % (ending, result), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Esempio n. 6
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument(
        "--retry",
        type=int,
        metavar="COUNT",
        help=
        "number of times a test should be attempted before giving up (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed); a negative %(metavar)s selects all tests; a zero %(metavar)s selects not-started tests; a positive %(metavar)s selects not-started, incomplete and failing tests; default is to select not-started tests"
    )
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument(
        "--output-directory",
        default=None,
        metavar="DIRECTORY",
        help="save test results as %(metavar)s/<test> instead of <test>/OUTPUT"
    )
    parser.add_argument(
        "directories",
        metavar="DIRECTORY",
        nargs="+",
        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  output-directory: %s", args.output_directory
                or "<testsuite>/<test>/OUTPUT (default)")
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(
        logger,
        args.directories,
        args,
        testsuite_output_directory=args.output_directory,
        log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s",
                     args.directories)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1
        logger.info(
            "Explicit directory list; forcing --retry=%d (retry failed tests)",
            args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        test_count = 0
        for test in tests:
            stats.add("total", test)
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count,
                                                         len(tests))

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("%s: passed", test_prefix)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)",
                                    test_prefix, result, test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    stats.add("retry", test)

            logger.info("%s: starting ...", test_prefix)
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            attempts = max(abs(retry), 1)
            for attempt in range(attempts):
                stats.add("attempts", test)

                # Create an output directory.  If there's already an
                # existing OUTPUT directory copy its contents to:
                #
                #     OUTPUT/YYYYMMDDHHMMSS.ATTEMPT
                #
                # so, when re-running, earlier attempts are saved.  Do
                # this before the OUTPUT/debug.log is started so that
                # each test attempt has its own log, and otherwise, it
                # too would be moved away.
                saved_output_directory = None
                saved_output = []
                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        # Include the time this test run started in
                        # the suffix - that way all saved results can
                        # be matched using a wild card.
                        saved_output_directory = os.path.join(
                            test.output_directory, "%s.%d" % (time.strftime(
                                "%Y%m%d%H%M%S", start_time), attempt))
                        logger.debug("moving existing OUTPUT to '%s'",
                                     saved_output_directory)
                        for name in os.listdir(test.output_directory):
                            src = os.path.join(test.output_directory, name)
                            dst = os.path.join(saved_output_directory, name)
                            if os.path.isfile(src):
                                os.makedirs(saved_output_directory,
                                            exist_ok=True)
                                os.rename(src, dst)
                                saved_output.append(name)
                                logger.debug("  moved '%s' to '%s'", src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(
                        logger, os.path.join(test.output_directory,
                                             "debug.log")):
                    logger.info(
                        "****** test %s attempt %d of %d started at %s ******",
                        test.name, attempt + 1, attempts, datetime.now())

                    # Add a log message about any saved output
                    # directory to the per-test-attempt debug log.  It
                    # just looks better.
                    if saved_output:
                        logger.info("saved existing '%s' in '%s'",
                                    saved_output, saved_output_directory)

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test,
                                             args,
                                             update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'",
                                        test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****",
                                         test.name)
                        result = post.mortem(test,
                                             args,
                                             update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("attempts(%s:%s)" % (ending, result), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT",
                        help="number of times a test should be attempted before giving up (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed); a negative %(metavar)s selects all tests; a zero %(metavar)s selects not-started tests; a positive %(metavar)s selects not-started, incomplete and failing tests; default is to select not-started tests")
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("--output-directory", default=None, metavar="DIRECTORY",
                        help="save test results as %(metavar)s/<test> instead of <test>/OUTPUT")
    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  output-directory: %s", args.output_directory or "<testsuite>/<test>/OUTPUT (default)")
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              testsuite_output_directory=args.output_directory,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1;
        logger.info("Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        test_count = 0
        for test in tests:
            stats.add("total", test)
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests))

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("%s: passed", test_prefix)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)", test_prefix,
                                    result, test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    stats.add("retry", test)

            logger.info("%s: starting ...", test_prefix)
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            attempts = max(abs(retry), 1)
            for attempt in range(attempts):
                stats.add("attempts", test)

                # Create an output directory.  If there's already an
                # existing OUTPUT directory copy its contents to:
                #
                #     OUTPUT/YYYYMMDDHHMMSS.ATTEMPT
                #
                # so, when re-running, earlier attempts are saved.  Do
                # this before the OUTPUT/debug.log is started so that
                # each test attempt has its own log, and otherwise, it
                # too would be moved away.
                saved_output_directory = None
                saved_output = []
                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        # Include the time this test run started in
                        # the suffix - that way all saved results can
                        # be matched using a wild card.
                        saved_output_directory = os.path.join(test.output_directory,
                                                              "%s.%d" % (time.strftime("%Y%m%d%H%M%S", start_time), attempt))
                        logger.debug("moving existing OUTPUT to '%s'", saved_output_directory)
                        for name in os.listdir(test.output_directory):
                            src = os.path.join(test.output_directory, name)
                            dst = os.path.join(saved_output_directory, name)
                            if os.path.isfile(src):
                                os.makedirs(saved_output_directory, exist_ok=True)
                                os.rename(src, dst)
                                saved_output.append(name)
                                logger.debug("  moved '%s' to '%s'", src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, attempt+1, attempts, datetime.now())

                    # Add a log message about any saved output
                    # directory to the per-test-attempt debug log.  It
                    # just looks better.
                    if saved_output:
                        logger.info("saved existing '%s' in '%s'", saved_output, saved_output_directory)

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****", test.name)
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("attempts(%s:%s)" % (ending, result), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0