Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="a testsuite directory, a TESTLIST file, or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    logutil.add_arguments(parser)
    skip.add_arguments(parser)
    ignore.add_arguments(parser)
    publish.add_arguments(parser)

    # These three calls go together
    args = parser.parse_args()
    logutil.config(args, sys.stdout)
    logger = logutil.getLogger("kvmrunner")

    logger.info("Options:")
    logger.info("  directories: %s", args.directories)
    logger.info("  verbose: %s", args.verbose)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    logutil.log_arguments(logger, args)
    skip.log_arguments(logger, args)
    ignore.log_arguments(logger, args)
    publish.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    test_stats = stats.Tests()
    result_stats = stats.Results()

    try:
        exit_code = 0
        logger.info("run started at %s", timing.START_TIME)
        runner.run_tests(logger, args, tests, test_stats, result_stats)
    except KeyboardInterrupt:
        logger.exception("**** interrupted ****")
        exit_code = 1

    test_stats.log_details(args.verbose and logger.info or logger.debug,
                           header="final stat details:", prefix="  ")
    result_stats.log_details(logger.info, header="final test details:", prefix="  ")

    test_stats.log_summary(logger.info, header="final test stats:", prefix="  ")
    result_stats.log_summary(logger.info, header="final test results:", prefix="  ")

    end_time = datetime.now()
    logger.info("run finished at %s after %s", end_time, end_time - timing.START_TIME)

    return exit_code
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="a testsuite directory, a TESTLIST file, or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)
    skip.add_arguments(parser)
    ignore.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  directories: %s", args.directories)
    logger.info("  verbose: %s", args.verbose)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)
    skip.log_arguments(logger, args)
    ignore.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    test_stats = stats.Tests()
    result_stats = stats.Results()

    try:
        exit_code = 0
        logger.info("run started at %s", timing.START_TIME)
        runner.run_tests(logger, args, tests, test_stats, result_stats)
    except KeyboardInterrupt:
        logger.exception("**** interrupted ****")
        exit_code = 1

    test_stats.log_details(args.verbose and logger.info or logger.debug,
                           header="final stat details:", prefix="  ")
    result_stats.log_details(logger.info, header="final test details:", prefix="  ")

    test_stats.log_summary(logger.info, header="final test stats:", prefix="  ")
    result_stats.log_summary(logger.info, header="final test results:", prefix="  ")

    end_time = datetime.now()
    logger.info("run finished at %s after %s", end_time, end_time - timing.START_TIME)

    return exit_code
Esempio n. 3
0
def main():

    # If SIGUSR1, backtrace all threads; hopefully this is early
    # enough.
    faulthandler.register(signal.SIGUSR1)

    parser = argparse.ArgumentParser(description="Run tests",
                                     epilog="SIGUSR1 will dump all thread stacks")

    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="a testsuite directory, a TESTLIST file, or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    logutil.add_arguments(parser)
    skip.add_arguments(parser)
    ignore.add_arguments(parser)
    publish.add_arguments(parser)

    # These three calls go together
    args = parser.parse_args()
    logutil.config(args, sys.stdout)
    logger = logutil.getLogger("kvmrunner")

    logger.info("Options:")
    logger.info("  directories: %s", args.directories)
    logger.info("  verbose: %s", args.verbose)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    logutil.log_arguments(logger, args)
    skip.log_arguments(logger, args)
    ignore.log_arguments(logger, args)
    publish.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    test_stats = stats.Tests()
    result_stats = stats.Results()

    try:
        exit_code = 0
        logger.info("run started at %s", timing.START_TIME)
        runner.run_tests(logger, args, tests, test_stats, result_stats)
    except KeyboardInterrupt:
        logger.exception("**** interrupted ****")
        exit_code = 1

    test_stats.log_details(args.verbose and logger.info or logger.debug,
                           header="final stat details:", prefix="  ")
    result_stats.log_details(logger.info, header="final test details:", prefix="  ")

    test_stats.log_summary(logger.info, header="final test stats:", prefix="  ")
    result_stats.log_summary(logger.info, header="final test results:", prefix="  ")

    stop_time = datetime.now()
    logger.info("run finished at %s after %s", stop_time, stop_time - timing.START_TIME)

    return exit_code
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT",
                        help="number of times a test should be attempted before giving up (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed); a negative %(metavar)s selects all tests; a zero %(metavar)s selects not-started tests; a positive %(metavar)s selects not-started, incomplete and failing tests; default is to select not-started tests")
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("--output-directory", default=None, metavar="DIRECTORY",
                        help="save test results as %(metavar)s/<test> instead of <test>/OUTPUT")
    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  output-directory: %s", args.output_directory or "<testsuite>/<test>/OUTPUT (default)")
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              testsuite_output_directory=args.output_directory,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1;
        logger.info("Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        test_count = 0
        for test in tests:
            stats.add("total", test)
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests))

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("%s: passed", test_prefix)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)", test_prefix,
                                    result, test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    stats.add("retry", test)

            logger.info("%s: starting ...", test_prefix)
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            attempts = max(abs(retry), 1)
            for attempt in range(attempts):
                stats.add("attempts", test)

                # Create an output directory.  If there's already an
                # existing OUTPUT directory copy its contents to:
                #
                #     OUTPUT/YYYYMMDDHHMMSS.ATTEMPT
                #
                # so, when re-running, earlier attempts are saved.  Do
                # this before the OUTPUT/debug.log is started so that
                # each test attempt has its own log, and otherwise, it
                # too would be moved away.
                saved_output_directory = None
                saved_output = []
                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        # Include the time this test run started in
                        # the suffix - that way all saved results can
                        # be matched using a wild card.
                        saved_output_directory = os.path.join(test.output_directory,
                                                              "%s.%d" % (time.strftime("%Y%m%d%H%M%S", start_time), attempt))
                        logger.debug("moving existing OUTPUT to '%s'", saved_output_directory)
                        for name in os.listdir(test.output_directory):
                            src = os.path.join(test.output_directory, name)
                            dst = os.path.join(saved_output_directory, name)
                            if os.path.isfile(src):
                                os.makedirs(saved_output_directory, exist_ok=True)
                                os.rename(src, dst)
                                saved_output.append(name)
                                logger.debug("  moved '%s' to '%s'", src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, attempt+1, attempts, datetime.now())

                    # Add a log message about any saved output
                    # directory to the per-test-attempt debug log.  It
                    # just looks better.
                    if saved_output:
                        logger.info("saved existing '%s' in '%s'", saved_output, saved_output_directory)

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****", test.name)
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("attempts(%s:%s)" % (ending, result), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Esempio n. 5
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument(
        "--retry",
        type=int,
        metavar="COUNT",
        help=
        ("number of times a test should be attempted before giving up"
         " (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed)"
         "; a negative %(metavar)s selects all tests"
         "; a zero %(metavar)s selects not-started tests"
         "; a positive %(metavar)s selects not-started, incomplete and failing tests"
         "; default is to select not-started tests"))
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("directories",
                        metavar="DIRECTORY",
                        nargs="*",
                        help=("Either a testsuite directory or"
                              " a list of test directories"))
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger,
                                              args.directories,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directory)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1
        logger.info(
            "Explicit directory list; forcing --retry=%d (retry failed tests)",
            args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        for test in tests:
            stats.add("total", test)

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("*** %s: ignore (%s)", test.name, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("*** %s: passed", test.name)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("*** %s: %s (delete '%s' to re-test)",
                                    test.name, result.value,
                                    test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            runs = max(abs(retry), 1)
            for run in range(runs):
                stats.add("runs", test)

                # Create an output directory.  If there's already an
                # existing OUTPUT directory rename it to OUTPUT...
                # Need to do this before the OUTPUT/debug.log is
                # started as otherwise it too would get moved away.
                saved_output_directory = None
                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        stats.add("reruns", test)
                        # Include the time this test run started in
                        # the suffix - that way all saved results can
                        # be matched using a wild card.  Include the
                        # time the directory was last modified in the
                        # suffix - it makes a good approximation as to
                        # when the previous test run finished.
                        stat = os.stat(test.output_directory)
                        mtime = time.localtime(
                            os.stat(test.output_directory).st_mtime)
                        saved_output_directory = (
                            test.output_directory +
                            time.strftime(".%Y%m%d%H%M", start_time) +
                            time.strftime(".%H%M%S", mtime))
                        logger.debug("renaming '%s' to '%s'",
                                     test.output_directory,
                                     saved_output_directory)
                        os.rename(test.output_directory,
                                  saved_output_directory)
                        # if the second attempt fails, let it crash
                        os.mkdir(test.output_directory)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test run.
                with logutil.TIMER, logutil.Debug(
                        logger, os.path.join(test.output_directory,
                                             "debug.log")):
                    logger.info(
                        "****** test %s attempt %d of %d started at %s ******",
                        test.name, run + 1, runs, datetime.now())
                    # Add a log message about any saved output
                    # directory to the per-test-run debug log.  It
                    # just looks better.
                    if saved_output_directory:
                        logger.info("existing OUTPUT saved in '%s'",
                                    saved_output_directory)
                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test,
                                             args,
                                             update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'",
                                        test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "')
                                f.write(result.value)
                                f.write('"')
                                f.write("\n")
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****",
                                         test.name)
                        result = post.mortem(test,
                                             args,
                                             update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("runs(%s:%s)" % (ending, result.value), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result.value, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Esempio n. 6
0
def main():

    # If SIGUSR1, backtrace all threads; hopefully this is early
    # enough.
    faulthandler.register(signal.SIGUSR1)

    parser = argparse.ArgumentParser(
        description="Run tests", epilog="SIGUSR1 will dump all thread stacks")

    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("--pid-file",
                        default="",
                        help="file to store process id of KVMRUNNER")

    parser.add_argument(
        "directories",
        metavar="DIRECTORY",
        nargs="+",
        help=
        "a testsuite directory, a TESTLIST file, or a list of test directories"
    )
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    logutil.add_arguments(parser)
    skip.add_arguments(parser)
    ignore.add_arguments(parser)
    publish.add_arguments(parser)

    # These three calls go together
    args = parser.parse_args()
    logutil.config(args, sys.stdout)
    logger = logutil.getLogger("kvmrunner")

    logger.info("Options:")
    logger.info("  directories: %s", args.directories)
    logger.info("  verbose: %s", args.verbose)
    logger.info("  pid-file: %s", args.pid_file)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    logutil.log_arguments(logger, args)
    skip.log_arguments(logger, args)
    ignore.log_arguments(logger, args)
    publish.log_arguments(logger, args)

    if args.pid_file:
        pid = os.getpid()
        logger.info("writing pid %d to '%s'", pid, args.pid_file)
        with open(args.pid_file, "wt") as pidfile:
            pidfile.write("%d\n" % os.getpid())

    tests = testsuite.load_testsuite_or_tests(logger,
                                              args.directories,
                                              args,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s",
                     args.directories)
        return 1

    if len(tests) == 1 and args.run_post_mortem is None:
        logger.warning(
            "skipping post-mortem.sh as only one test; use --run-post-mortem true to override this"
        )
        args.run_post_mortem = False

    test_stats = stats.Tests()
    result_stats = stats.Results()

    try:
        exit_code = 0
        logger.info("run started at %s", timing.START_TIME)
        runner.run_tests(logger, args, tests, test_stats, result_stats)
    except KeyboardInterrupt:
        logger.exception("**** interrupted ****")
        exit_code = 1

    test_stats.log_details(args.verbose and logger.info or logger.debug,
                           header="final stat details:",
                           prefix="  ")
    result_stats.log_details(logger.info,
                             header="final test details:",
                             prefix="  ")

    test_stats.log_summary(logger.info,
                           header="final test stats:",
                           prefix="  ")
    result_stats.log_summary(logger.info,
                             header="final test results:",
                             prefix="  ")

    stop_time = datetime.now()
    logger.info("run finished at %s after %s", stop_time,
                stop_time - timing.START_TIME)

    return exit_code
Esempio n. 7
0
def main():

    parser = argparse.ArgumentParser(description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]",
                                     epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).  If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified.")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("--quick", action="store_true",
                        help=("Use the previously generated '.console.txt' and '.console.diff' files"))
    parser.add_argument("--quick-sanitize", action="store_true",
                        help=("Use the previously generated '.console.txt' file"))
    parser.add_argument("--quick-diff", action="store_true",
                        help=("Use the previously generated '.console.diff' file"))
    parser.add_argument("--update", action="store_true",
                        help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize", action="store_true",
                        help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff", action="store_true",
                        help=("Update the '.console.diff' file"))

    parser.add_argument("--print-directory", action="store_true")
    parser.add_argument("--print-name", action="store_true")
    # parser.add_argument("--print-result", action="store_true")
    parser.add_argument("--print-diff", action="store_true")
    parser.add_argument("--print-args", action="store_true")
    parser.add_argument("--print-output-directory", action="store_true")

    parser.add_argument("--list-ignored", action="store_true",
                        help="include ignored tests in the list")
    parser.add_argument("--list-untested", action="store_true",
                        help="include untested tests in the list")

    parser.add_argument("directories", metavar="TEST-DIRECTORY", nargs="+",
                        help=("Either a testsuite (only one) or test directory"))
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consume all remaining parameters.
    parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?",
                        help=("An optional testsuite directory containing"
                              " results from a previous test run"))
    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0
    args.print_directory = args.print_directory or args.verbose > v
    args.print_name = args.print_name or args.verbose > v
    v += 1
    args.print_output_directory = args.print_output_directory or args.verbose > v
    v += 1
    args.list_untested = args.list_untested or args.verbose > v ; v += 1
    args.list_ignored = args.list_ignored or args.verbose > v ; v += 1
    v += 1
    args.print_args = args.print_args or args.verbose > v

    if args.print_args:
        post.log_arguments(logger, args)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        return 1

    # Is the last argument some sort of baseline?  If it is, pre-load
    # it.
    #
    # XXX: Should also support something like --baseline-testsuite and
    # --baseline-output parameters.
    baseline = None
    if len(args.directories) > 1:
        # If there is more than one directory then, perhaps, the last
        # one is a baseline.  A baseline might be: a complete
        # testsuite snapshot; or just output saved as
        # testing/pluto/OUTPUT/TESTDIR.
        baseline = testsuite.load(logger, args.directories[-1], args,
                                  error_level=logutil.DEBUG)
        if baseline:
            # discard the last argument as consumed above.
            logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1])
            args.directories.pop()

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    # When an explicit list of directories was specified always print
    # all of them (otherwise, tests seem to get lost).
    if isinstance(tests, list):
        args.list_untested = True

    for test in tests:

        # Produce separate runtimes for each test.
        with logutil.TIMER:

            logger.debug("start processing test %s", test.name)

            # Filter out tests that are being ignored?
            ignore = testsuite.ignore(test, args)
            if ignore and not args.list_ignored:
                continue

            # Filter out tests that have not been run?
            result = None
            if not ignore:
                result = post.mortem(test, args, baseline=baseline,
                                     output_directory=test.saved_output_directory,
                                     skip_sanitize=args.quick or args.quick_sanitize,
                                     skip_diff=args.quick or args.quick_diff,
                                     update=args.update,
                                     update_sanitize=args.update_sanitize,
                                     update_diff=args.update_diff)
                if not result and not args.list_untested:
                    continue

            sep = ""

            # Print the test's name/path
            if not args.print_directory and not args.print_name and not args.print_output_directory:
                # By default: when the path given on the command line
                # explicitly specifies a test's output directory
                # (found in TEST.SAVED_OUTPUT_DIRECTORY), print that;
                # otherwise print the path to the test's directory.
                print(sep, end="")
                print((test.saved_output_directory
                       and test.saved_output_directory
                       or test.directory), end="")
                sep = " "
            else:
                # Print the test name/path per command line
                if args.print_name:
                    print(sep, end="")
                    print(test.name, end="")
                    sep = " "
                if args.print_directory:
                    print(sep, end="")
                    print(test.directory, end="")
                    sep = " "
                if args.print_output_directory:
                    print(sep, end="")
                    print((test.saved_output_directory
                           and test.saved_output_directory
                           or test.output_directory), end="")
                    sep = " "

            if ignore:
                print(sep, end="")
                print("ignored", ignore, end="")
                sep = " "

            print(sep, end="")
            if result.errors:
                print(result, result.errors, end="")
            else:
                print(result, end="")
            sep = " "

            print()

            if args.print_diff and result:
                for domain in result.diffs:
                    for line in result.diffs[domain]:
                        if line:
                            print(line)

            sys.stdout.flush()

            logger.debug("stop processing test %s", test.name)

    return 0
Esempio n. 8
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT", default=1,
                        help="which previously run tests should be retried: 0 selects not-started tests; 1 selects not-started+failed tests; -1 selects not-started+failed+passed tests (default is %(default)s)")
    parser.add_argument("--attempts", type=int, default=1,
                        help="number of times to attempt a test before giving up; default %(default)s")

    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    # Default to BACKUP under the current directory.  Name is
    # arbitrary, chosen for its hopefully unique first letter
    # (avoiding Makefile, OBJ, README, ... :-).
    parser.add_argument("--backup-directory", metavar="DIRECTORY",
                        default=os.path.join("BACKUP", time.strftime("%Y%m%d%H%M%S", time.localtime())),
                        help="backup existing <test>/OUTPUT to %(metavar)s/<test> (default: %(default)s)")

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry)
    logger.info("  attempts: %s", args.attempts)
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  backup-directory: %s", args.backup_directory)
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    test_stats = stats.Tests()
    result_stats = stats.Results()

    start_time = datetime.now()

    try:
        logger.info("run started at %s", start_time)

        test_count = 0
        for test in tests:

            test_stats.add("total", test)
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests))

            ignore = testsuite.ignore(test, args)
            if ignore:
                result_stats.add_ignore(test, ignore)
                test_stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.

            # Be lazy with gathering the results, don't run the
            # sanitizer or diff.
            old_result = post.mortem(test, args, skip_diff=True, skip_sanitize=True)
            if args.retry >= 0:
                if old_result:
                    if old_result.passed:
                        logger.info("%s: passed", test_prefix)
                        test_stats.add("skipped", test)
                        result_stats.add_skip(old_result)
                        continue
                    if args.retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)", test_prefix,
                                    result, test.output_directory)
                        test_stats.add("skipped", test)
                        result_stats.add_skip(old_result)
                        continue
                    test_stats.add("retry", test)

            logger.info("%s: starting ...", test_prefix)
            test_stats.add("tests", test)

            # Move the contents of the existing OUTPUT directory to
            # BACKUP_DIRECTORY.  Do it file-by-file so that, at no
            # point, the directory is empty.
            #
            # By moving each test just before it is started a trail of
            # what tests were attempted at each run is left.
            #
            # XXX: During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # Always moving the directory contents to the
            # BACKUP_DIRECTORY mitigates this some.

            saved_output_directory = None
            if os.path.exists(test.output_directory):
                saved_output_directory = os.path.join(args.backup_directory, test.name)
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, saved_output_directory)
                # Copy "empty" OUTPUT directories too.
                args.dry_run or os.makedirs(saved_output_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(saved_output_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    args.dry_run or os.replace(src, dst)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            for attempt in range(args.attempts):
                test_stats.add("attempts", test)

                # Create the OUTPUT directory.
                try:
                    if not args.dry_run:
                        os.mkdir(test.output_directory)
                    elif os.exists(test.output_directory):
                        raise FileExistsError()
                except FileExistsError:
                    # On first attempt, the OUTPUT directory will
                    # be empty (see above) so no need to save.
                    if attempt > 0:
                        saved_output_directory = os.path.join(test.output_directory, str(attempt))
                        logger.info("moving contents of '%s' to '%s'",
                                    test.output_directory, saved_output_directory)
                        args.dry_run or os.makedirs(saved_output_directory, exist_ok=True)
                        for name in os.listdir(test.output_directory):
                            if os.path.isfile(src):
                                src = os.path.join(test.output_directory, name)
                                dst = os.path.join(saved_output_directory, name)
                                logger.debug("moving '%s' to '%s'", src, dst)
                                args.dry_run or os.replace(src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, attempt+1, args.attempts, datetime.now())

                    if saved_output_directory:
                        logger.info("contents of '%s' moved to '%s'",
                                    test.output_directory, saved_output_directory)
                    saved_output_directory = None

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****", test.name)
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    test_stats.add("attempts(%s:%s)" % (ending, result), test)
                    if result.errors:
                        logger.info("****** test %s %s %s ******", test.name, result, result.errors)
                    else:
                        logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT.  During a control-c or crash
            # the below will not be executed.

            test_stats.add("tests(%s)" % result, test)
            result_stats.add_result(result, old_result)

            test_stats.log_summary(logger.info, header="updated stats:",
                                   prefix="    ")
            result_stats.log_summary(logger.info, header="updated results:",
                                     prefix="    ")

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    level = args.verbose and logger.info or logger.debug
    test_stats.log_details(level, header="stat details:", prefix="  ")
    result_stats.log_details(logger.info, header="result details:", prefix="  ")

    test_stats.log_summary(logger.info, header="stat summary:", prefix="  ")
    result_stats.log_summary(logger.info, header="result summary:", prefix="  ")

    end_time = datetime.now()
    logger.info("run finished at %s after %s", end_time, end_time - start_time)

    return 0
Esempio n. 9
0
def main():

    parser = argparse.ArgumentParser(
        description=
        "list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]",
        epilog=
        "By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).  If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified."
    )
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument(
        "--quick",
        action="store_true",
        help=
        ("Use the previously generated '.console.txt' and '.console.diff' files"
         ))
    parser.add_argument(
        "--quick-sanitize",
        action="store_true",
        help=("Use the previously generated '.console.txt' file"))
    parser.add_argument(
        "--quick-diff",
        action="store_true",
        help=("Use the previously generated '.console.diff' file"))

    parser.add_argument(
        "--update",
        action="store_true",
        help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize",
                        action="store_true",
                        help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff",
                        action="store_true",
                        help=("Update the '.console.diff' file"))

    parser.add_argument("--dump-args", action="store_true")

    parser.add_argument("--prefix",
                        action="store",
                        type=Prefix,
                        choices=[p for p in Prefix],
                        help="prefix to display with each test")

    # how to parse --print directory,saved-directory,...?
    parser.add_argument("--print",
                        action="append",
                        default=[],
                        choices=[p for p in Print],
                        type=Print,
                        help="what information to display about each test")

    parser.add_argument(
        "--stats",
        action="store",
        default=Stats.summary,
        type=Stats,
        choices=[c for c in Stats],
        help="provide overview statistics; default: \"%(default)s\"")

    parser.add_argument(
        "--baseline",
        metavar="DIRECTORY",
        help="a %(metavar)s containing baseline testsuite output")

    parser.add_argument(
        "directories",
        metavar="DIRECTORY",
        nargs="+",
        help=
        "%(metavar)s containing: a test, a testsuite (contains a TESTLIST file), a TESTLIST file, test output, or testsuite output"
    )
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consumes all remaining arguments.
    parser.add_argument(
        "baseline",
        metavar="BASELINE-DIRECTORY",
        nargs="?",
        help=
        "an optional testsuite directory (contains a TESTLIST file) containing output from a previous test run"
    )

    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)
    skip.add_arguments(parser)
    ignore.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # default to printing results
    if not args.print:
        args.print = [Print.result]

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0

    if args.dump_args:
        logger.info("Arguments:")
        logger.info("  Stats: %s", args.stats)
        logger.info("  Print: %s", args.print)
        logger.info("  Prefix: %s", args.prefix)
        post.log_arguments(logger, args)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        skip.log_arguments(logger, args)
        ignore.log_arguments(logger, args)
        return 0

    # Try to find a baseline.  If present, pre-load it.
    baseline = None
    if args.baseline:
        # An explict baseline testsuite, can be more forgiving in how
        # it is loaded.
        baseline = testsuite.load(logger,
                                  args,
                                  testsuite_directory=args.baseline,
                                  error_level=logutil.DEBUG)
        if not baseline:
            # Perhaps the baseline just contains output, magic up the
            # corresponding testsuite directory.
            baseline_directory = os.path.join(args.testing_directory, "pluto")
            baseline = testsuite.load(
                logger,
                args,
                testsuite_directory=baseline_directory,
                saved_testsuite_output_directory=args.baseline,
                error_level=logutil.DEBUG)
        if not baseline:
            logger.info("'%s' is not a baseline", args.baseline)
            return 1
    elif len(args.directories) > 1:
        # If there is more than one directory then, perhaps, the last
        # one is a baseline.  A baseline might be: a complete
        # testsuite snapshot; or just output saved as
        # testing/pluto/OUTPUT/TESTDIR.
        baseline = testsuite.load(logger,
                                  logutil.DEBUG,
                                  args,
                                  testsuite_directory=args.directories[-1])
        if baseline:
            # discard the last argument as consumed above.
            logger.debug("discarding baseline testsuite argument '%s'",
                         args.directories[-1])
            args.directories.pop()

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    result_stats = stats.Results()
    try:
        results(logger, tests, baseline, args, result_stats)
    finally:
        if args.stats is Stats.details:
            result_stats.log_details(stderr_log,
                                     header="Details:",
                                     prefix="  ")
        if args.stats in [Stats.details, Stats.summary]:
            result_stats.log_summary(stderr_log,
                                     header="Summary:",
                                     prefix="  ")

    return 0
Esempio n. 10
0
def main():

    parser = argparse.ArgumentParser(description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]",
                                     epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).  If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified.")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("--quick", action="store_true",
                        help=("Use the previously generated '.console.txt' and '.console.diff' files"))
    parser.add_argument("--quick-sanitize", action="store_true",
                        help=("Use the previously generated '.console.txt' file"))
    parser.add_argument("--quick-diff", action="store_true",
                        help=("Use the previously generated '.console.diff' file"))
    parser.add_argument("--update", action="store_true",
                        help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize", action="store_true",
                        help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff", action="store_true",
                        help=("Update the '.console.diff' file"))

    parser.add_argument("--prefix-directory", action="store_true")
    parser.add_argument("--prefix-name", action="store_true")
    parser.add_argument("--prefix-output-directory", action="store_true")

    parser.add_argument("--print-result", action="store_true")
    parser.add_argument("--print-diff", action="store_true")
    parser.add_argument("--print-args", action="store_true")
    parser.add_argument("--print-scripts", action="store_true")
    parser.add_argument("--print-domains", action="store_true")
    parser.add_argument("--print-initiators", action="store_true")

    parser.add_argument("--list-ignored", action="store_true",
                        help="include ignored tests in the list")
    parser.add_argument("--list-untested", action="store_true",
                        help="include untested tests in the list")

    parser.add_argument("--baseline", metavar="DIRECTORY",
                        help="a %(metavar)s containing baseline testsuite output")

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="%(metavar)s containing: a test, a testsuite (contains a TESTLIST file), test output, or testsuite output")
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consumes all remaining arguments.
    parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?",
                        help="an optional testsuite directory (contains a TESTLIST file) containing output from a previous test run")

    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # default to printing results
    if not args.print_scripts \
       and not args.print_result \
       and not args.print_diff \
       and not args.print_initiators \
       and not args.print_domains:
        args.print_result = True

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0
    args.prefix_directory = args.prefix_directory or args.verbose > v
    args.prefix_name = args.prefix_name or args.verbose > v
    args.print_result = args.print_result or args.verbose > v
    v += 1
    args.prefix_output_directory = args.prefix_output_directory or args.verbose > v
    v += 1
    args.list_untested = args.list_untested or args.verbose > v
    args.list_ignored = args.list_ignored or args.verbose > v
    v += 1
    args.print_scripts = args.print_scripts or args.verbose > v
    v += 1
    args.print_args = args.print_args or args.verbose > v

    if args.print_args:
        post.log_arguments(logger, args)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        return 1

    # Try to find a baseline.  If present, pre-load it.
    baseline = None
    if args.baseline:
        # An explict baseline testsuite, can be more forgiving.
        baseline = testsuite.load(logger, args,
                                  testsuite_directory=args.baseline,
                                  testsuite_output_directory=None,
                                  error_level=logutil.DEBUG)
        if not baseline:
            # Assume that it is baseline output only.
            if args.testing_directory:
                baseline_directory = os.path.join(args.testing_directory, "pluto")
            else:
                baseline_directory = utils.directory("..", "pluto")
            baseline = testsuite.load(logger, args,
                                      testsuite_directory=baseline_directory,
                                      testsuite_output_directory=args.baseline,
                                      error_level=logutil.DEBUG)
        if not baseline:
            logger.info("'%s' is not a baseline", args.baseline)
            return 1
    elif len(args.directories) > 1:
        # If there is more than one directory then, perhaps, the last
        # one is a baseline.  A baseline might be: a complete
        # testsuite snapshot; or just output saved as
        # testing/pluto/OUTPUT/TESTDIR.
        baseline = testsuite.load(logger, args,
                                  testsuite_directory=args.directories[-1],
                                  testsuite_output_directory=None,
                                  error_level=logutil.DEBUG)
        if baseline:
            # discard the last argument as consumed above.
            logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1])
            args.directories.pop()

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    # When an explicit list of directories was specified always print
    # all of them (otherwise, tests seem to get lost).
    if isinstance(tests, list):
        args.list_untested = True

    result_stats = stats.Results()
    try:
        results(logger, tests, baseline, args, result_stats)
    finally:
        result_stats.log_summary(stderr_log, prefix="  ")

    return 0
Esempio n. 11
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT",
                        help="number of times a test should be attempted before giving up (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed); a negative %(metavar)s selects all tests; a zero %(metavar)s selects not-started tests; a positive %(metavar)s selects not-started, incomplete and failing tests; default is to select not-started tests")
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("--output-directory", default=None, metavar="DIRECTORY",
                        help="save test results as %(metavar)s/<test> instead of <test>/OUTPUT")
    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  output-directory: %s", args.output_directory or "<testsuite>/<test>/OUTPUT (default)")
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              testsuite_output_directory=args.output_directory,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1;
        logger.info("Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        test_count = 0
        for test in tests:
            stats.add("total", test)
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests))

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("%s: passed", test_prefix)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)", test_prefix,
                                    result, test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    stats.add("retry", test)

            logger.info("%s: starting ...", test_prefix)
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            attempts = max(abs(retry), 1)
            for attempt in range(attempts):
                stats.add("attempts", test)

                # On first attempt (attempt == 0), empty the
                # <test>/OUTPUT/ directory of all contents.  On
                # subsequent attempts, move the files from the
                # previous attempt to <test>/OUTPUT/<attempt>/.
                #
                # XXX: Don't just delete the OUTPUT/ directory as
                # this, for a short period, changes the status of the
                # test to never-run.
                #
                # XXX: During boot, swan-transmogrify runs "chcon -R
                # testing/pluto".  Of course this means that each time
                # a test is added and/or a test is run (adding files
                # under <test>/OUTPUT), the boot process (and
                # consequently the time taken to run a test) keeps
                # increasing.
                #
                # Mitigate this slightly by emptying <test>/OUTPUT
                # before starting any test attempts.  It's assumed
                # that the previous test run was already captured
                # above with save-directory.

                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        saved_output_directory = os.path.join(test.output_directory, str(attempt))
                        logger.info("emptying directory '%s'", test.output_directory)
                        for name in os.listdir(test.output_directory):
                            src = os.path.join(test.output_directory, name)
                            if attempt == 0:
                                logger.debug("  remove '%s'", src)
                                if os.path.isfile(src):
                                    os.remove(src)
                                else:
                                    shutil.rmtree(src)
                            elif os.path.isfile(src):
                                dst = os.path.join(saved_output_directory, name)
                                logger.debug("  move '%s' to '%s'", src, dst)
                                os.makedirs(saved_output_directory, exist_ok=True)
                                os.rename(src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, attempt+1, attempts, datetime.now())

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****", test.name)
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("attempts(%s:%s)" % (ending, result), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Esempio n. 12
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument(
        "--retry",
        type=int,
        metavar="COUNT",
        help=
        "number of times a test should be attempted before giving up (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed); a negative %(metavar)s selects all tests; a zero %(metavar)s selects not-started tests; a positive %(metavar)s selects not-started, incomplete and failing tests; default is to select not-started tests"
    )
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument(
        "--output-directory",
        default=None,
        metavar="DIRECTORY",
        help="save test results as %(metavar)s/<test> instead of <test>/OUTPUT"
    )
    parser.add_argument(
        "directories",
        metavar="DIRECTORY",
        nargs="+",
        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  output-directory: %s", args.output_directory
                or "<testsuite>/<test>/OUTPUT (default)")
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(
        logger,
        args.directories,
        args,
        testsuite_output_directory=args.output_directory,
        log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s",
                     args.directories)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1
        logger.info(
            "Explicit directory list; forcing --retry=%d (retry failed tests)",
            args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        test_count = 0
        for test in tests:
            stats.add("total", test)
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count,
                                                         len(tests))

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("%s: passed", test_prefix)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)",
                                    test_prefix, result, test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    stats.add("retry", test)

            logger.info("%s: starting ...", test_prefix)
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            attempts = max(abs(retry), 1)
            for attempt in range(attempts):
                stats.add("attempts", test)

                # Create an output directory.  If there's already an
                # existing OUTPUT directory copy its contents to:
                #
                #     OUTPUT/YYYYMMDDHHMMSS.ATTEMPT
                #
                # so, when re-running, earlier attempts are saved.  Do
                # this before the OUTPUT/debug.log is started so that
                # each test attempt has its own log, and otherwise, it
                # too would be moved away.
                saved_output_directory = None
                saved_output = []
                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        # Include the time this test run started in
                        # the suffix - that way all saved results can
                        # be matched using a wild card.
                        saved_output_directory = os.path.join(
                            test.output_directory, "%s.%d" % (time.strftime(
                                "%Y%m%d%H%M%S", start_time), attempt))
                        logger.debug("moving existing OUTPUT to '%s'",
                                     saved_output_directory)
                        for name in os.listdir(test.output_directory):
                            src = os.path.join(test.output_directory, name)
                            dst = os.path.join(saved_output_directory, name)
                            if os.path.isfile(src):
                                os.makedirs(saved_output_directory,
                                            exist_ok=True)
                                os.rename(src, dst)
                                saved_output.append(name)
                                logger.debug("  moved '%s' to '%s'", src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(
                        logger, os.path.join(test.output_directory,
                                             "debug.log")):
                    logger.info(
                        "****** test %s attempt %d of %d started at %s ******",
                        test.name, attempt + 1, attempts, datetime.now())

                    # Add a log message about any saved output
                    # directory to the per-test-attempt debug log.  It
                    # just looks better.
                    if saved_output:
                        logger.info("saved existing '%s' in '%s'",
                                    saved_output, saved_output_directory)

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test,
                                             args,
                                             update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'",
                                        test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****",
                                         test.name)
                        result = post.mortem(test,
                                             args,
                                             update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("attempts(%s:%s)" % (ending, result), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Esempio n. 13
0
def main():

    parser = argparse.ArgumentParser(
        description=
        "list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]",
        epilog=
        "By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).  If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified."
    )
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument(
        "--quick",
        action="store_true",
        help=
        ("Use the previously generated '.console.txt' and '.console.diff' files"
         ))
    parser.add_argument(
        "--quick-sanitize",
        action="store_true",
        help=("Use the previously generated '.console.txt' file"))
    parser.add_argument(
        "--quick-diff",
        action="store_true",
        help=("Use the previously generated '.console.diff' file"))
    parser.add_argument(
        "--update",
        action="store_true",
        help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize",
                        action="store_true",
                        help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff",
                        action="store_true",
                        help=("Update the '.console.diff' file"))

    parser.add_argument("--print-directory", action="store_true")
    parser.add_argument("--print-name", action="store_true")
    # parser.add_argument("--print-result", action="store_true")
    parser.add_argument("--print-diff", action="store_true")
    parser.add_argument("--print-args", action="store_true")
    parser.add_argument("--print-output-directory", action="store_true")

    parser.add_argument("--list-ignored",
                        action="store_true",
                        help="include ignored tests in the list")
    parser.add_argument("--list-untested",
                        action="store_true",
                        help="include untested tests in the list")

    parser.add_argument(
        "directories",
        metavar="TEST-DIRECTORY",
        nargs="+",
        help=("Either a testsuite (only one) or test directory"))
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consume all remaining parameters.
    parser.add_argument("baseline",
                        metavar="BASELINE-DIRECTORY",
                        nargs="?",
                        help=("An optional testsuite directory containing"
                              " results from a previous test run"))
    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0
    args.print_directory = args.print_directory or args.verbose > v
    args.print_name = args.print_name or args.verbose > v
    v += 1
    args.print_output_directory = args.print_output_directory or args.verbose > v
    v += 1
    args.list_untested = args.list_untested or args.verbose > v
    v += 1
    args.list_ignored = args.list_ignored or args.verbose > v
    v += 1
    v += 1
    args.print_args = args.print_args or args.verbose > v

    if args.print_args:
        post.log_arguments(logger, args)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        return 1

    # Is the last argument some sort of baseline?  If it is, pre-load
    # it.
    #
    # XXX: Should also support something like --baseline-testsuite and
    # --baseline-output parameters.
    baseline = None
    if len(args.directories) > 1:
        # If there is more than one directory then, perhaps, the last
        # one is a baseline.  A baseline might be: a complete
        # testsuite snapshot; or just output saved as
        # testing/pluto/OUTPUT/TESTDIR.
        baseline = testsuite.load(logger,
                                  args.directories[-1],
                                  args,
                                  error_level=logutil.DEBUG)
        if baseline:
            # discard the last argument as consumed above.
            logger.debug("discarding baseline testsuite argument '%s'",
                         args.directories[-1])
            args.directories.pop()

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    # When an explicit list of directories was specified always print
    # all of them (otherwise, tests seem to get lost).
    if isinstance(tests, list):
        args.list_untested = True

    for test in tests:

        # Produce separate runtimes for each test.
        with logutil.TIMER:

            logger.debug("start processing test %s", test.name)

            # Filter out tests that are being ignored?
            ignore = testsuite.ignore(test, args)
            if ignore and not args.list_ignored:
                continue

            # Filter out tests that have not been run?
            result = None
            if not ignore:
                result = post.mortem(
                    test,
                    args,
                    baseline=baseline,
                    output_directory=test.saved_output_directory,
                    skip_sanitize=args.quick or args.quick_sanitize,
                    skip_diff=args.quick or args.quick_diff,
                    update=args.update,
                    update_sanitize=args.update_sanitize,
                    update_diff=args.update_diff)
                if not result and not args.list_untested:
                    continue

            sep = ""

            # Print the test's name/path
            if not args.print_directory and not args.print_name and not args.print_output_directory:
                # By default: when the path given on the command line
                # explicitly specifies a test's output directory
                # (found in TEST.SAVED_OUTPUT_DIRECTORY), print that;
                # otherwise print the path to the test's directory.
                print(sep, end="")
                print((test.saved_output_directory
                       and test.saved_output_directory or test.directory),
                      end="")
                sep = " "
            else:
                # Print the test name/path per command line
                if args.print_name:
                    print(sep, end="")
                    print(test.name, end="")
                    sep = " "
                if args.print_directory:
                    print(sep, end="")
                    print(test.directory, end="")
                    sep = " "
                if args.print_output_directory:
                    print(sep, end="")
                    print((test.saved_output_directory
                           and test.saved_output_directory
                           or test.output_directory),
                          end="")
                    sep = " "

            if ignore:
                print(sep, end="")
                print("ignored", ignore, end="")
                sep = " "

            print(sep, end="")
            if result.errors:
                print(result, result.errors, end="")
            else:
                print(result, end="")
            sep = " "

            print()

            if args.print_diff and result:
                for domain in result.diffs:
                    for line in result.diffs[domain]:
                        if line:
                            print(line)

            sys.stdout.flush()

            logger.debug("stop processing test %s", test.name)

    return 0
Esempio n. 14
0
def main():

    parser = argparse.ArgumentParser(
        description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]",
        epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).",
    )
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument(
        "--quick", action="store_true", help=("Use the previously generated '.console.txt' and '.console.diff' files")
    )
    parser.add_argument(
        "--quick-sanitize", action="store_true", help=("Use the previously generated '.console.txt' file")
    )
    parser.add_argument("--quick-diff", action="store_true", help=("Use the previously generated '.console.diff' file"))
    parser.add_argument("--update", action="store_true", help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize", action="store_true", help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff", action="store_true", help=("Update the '.console.diff' file"))

    parser.add_argument("--print-directory", action="store_true")
    parser.add_argument("--print-name", action="store_true")
    parser.add_argument("--print-result", action="store_true")
    parser.add_argument("--print-diff", action="store_true")
    parser.add_argument("--print-args", action="store_true")

    parser.add_argument("--list-ignored", action="store_true", help="include ignored tests in the list")
    parser.add_argument("--list-untested", action="store_true", help="include untested tests in the list")

    parser.add_argument(
        "directories", metavar="TEST-DIRECTORY", nargs="+", help=("Either a testsuite (only one) or test directory")
    )
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consume all remaining parameters.
    parser.add_argument(
        "baseline",
        metavar="BASELINE-DIRECTORY",
        nargs="?",
        help=("An optional testsuite directory containing" " results from a previous test run"),
    )
    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0
    args.print_directory = args.print_directory or args.verbose > v
    args.print_name = args.print_name or args.verbose > v
    v += 1
    args.list_untested = args.list_untested or args.verbose > v
    v += 1
    args.list_ignored = args.list_ignored or args.verbose > v
    v += 1
    v += 1
    args.print_args = args.print_args or args.verbose > v

    # By default print the relative directory path.
    if not args.print_directory and not args.print_name:
        args.print_directory = True

    if args.print_args:
        post.log_arguments(logger, args)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        return 1

    # If there is more than one directory then the last might be the
    # baseline.  Try loading it as a testsuite (baselines are
    # testsuites) to see if that is the case.
    basetests = None
    tests = None
    if len(args.directories) > 1:
        # Perhaps the last argument is the baseline?  Suppress any
        # nasty errors.
        basetests = testsuite.load(logger, args.directories[-1], error_level=logutil.DEBUG)
        if basetests:
            logger.debug("basetests loaded from '%s'", basetests.directory)
            args.directories.pop()
    tests = testsuite.load_testsuite_or_tests(logger, args.directories)
    logger.debug("basetests=%s", basetests)
    logger.debug("tests=%s", tests)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    # When an explicit list of directories was specified always print
    # all of them (otherwise, tests seem to get lost).
    if isinstance(tests, list):
        args.list_untested = True

    # Preload the baseline.  This avoids re-scanning the TESTLIST.
    # Also, passing the full baseline to Test.results() lets that
    # function differentiate between a baseline missing results or
    # being entirely absent.
    baseline = None
    if basetests:
        baseline = {}
        for test in basetests:
            baseline[test.name] = test

    for test in tests:

        # Produce separate runtimes for each test.
        with logutil.TIMER:

            logger.debug("start processing test %s", test.name)

            # Filter out tests that are being ignored?
            ignore = testsuite.ignore(test, args)
            if ignore and not args.list_ignored:
                continue

            # Filter out tests that have not been run?
            result = None
            if not ignore:
                result = post.mortem(
                    test,
                    args,
                    baseline=baseline,
                    output_directory=test.old_output_directory,
                    skip_sanitize=args.quick or args.quick_sanitize,
                    skip_diff=args.quick or args.quick_diff,
                    update=args.update,
                    update_sanitize=args.update_sanitize,
                    update_diff=args.update_diff,
                )
                if not result and not args.list_untested:
                    continue

            sep = ""

            if args.print_name:
                print(sep, end="")
                print(test.name, end="")
                sep = " "

            if args.print_directory:
                print(sep, end="")
                print(test.directory, end="")
                sep = " "

            if ignore:
                print(sep, end="")
                print("ignored", ignore, end="")
                sep = " "

            if result:
                print(sep, end="")
                print(result, end="")
                sep = " "

            print()

            if args.print_diff and result:
                for domain in result.diffs:
                    for line in result.diffs[domain]:
                        if line:
                            print(line)

            sys.stdout.flush()

            logger.debug("stop processing test %s", test.name)

    return 0
Esempio n. 15
0
def main():

    parser = argparse.ArgumentParser(
        description=
        "list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]",
        epilog=
        "By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately)."
    )
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument(
        "--quick",
        action="store_true",
        help=
        ("Use the previously generated '.console.txt' and '.console.diff' files"
         ))
    parser.add_argument(
        "--quick-sanitize",
        action="store_true",
        help=("Use the previously generated '.console.txt' file"))
    parser.add_argument(
        "--quick-diff",
        action="store_true",
        help=("Use the previously generated '.console.diff' file"))
    parser.add_argument(
        "--update",
        action="store_true",
        help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize",
                        action="store_true",
                        help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff",
                        action="store_true",
                        help=("Update the '.console.diff' file"))

    parser.add_argument("--print-directory", action="store_true")
    parser.add_argument("--print-name", action="store_true")
    parser.add_argument("--print-result", action="store_true")
    parser.add_argument("--print-diff", action="store_true")
    parser.add_argument("--print-args", action="store_true")

    parser.add_argument("--list-ignored",
                        action="store_true",
                        help="include ignored tests in the list")
    parser.add_argument("--list-untested",
                        action="store_true",
                        help="include untested tests in the list")

    parser.add_argument(
        "directories",
        metavar="TEST-DIRECTORY",
        nargs="+",
        help=("Either a testsuite (only one) or test directory"))
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consume all remaining parameters.
    parser.add_argument("baseline",
                        metavar="BASELINE-DIRECTORY",
                        nargs="?",
                        help=("An optional testsuite directory containing"
                              " results from a previous test run"))
    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0
    args.print_directory = args.print_directory or args.verbose > v
    args.print_name = args.print_name or args.verbose > v
    v += 1
    args.list_untested = args.list_untested or args.verbose > v
    v += 1
    args.list_ignored = args.list_ignored or args.verbose > v
    v += 1
    v += 1
    args.print_args = args.print_args or args.verbose > v

    # By default print the relative directory path.
    if not args.print_directory and not args.print_name:
        args.print_directory = True

    if args.print_args:
        post.log_arguments(logger, args)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        return 1

    # If there is more than one directory then the last might be the
    # baseline.  Try loading it as a testsuite (baselines are
    # testsuites) to see if that is the case.
    basetests = None
    tests = None
    if len(args.directories) > 1:
        # Perhaps the last argument is the baseline?  Suppress any
        # nasty errors.
        basetests = testsuite.load(logger,
                                   args.directories[-1],
                                   error_level=logutil.DEBUG)
        if basetests:
            logger.debug("basetests loaded from '%s'", basetests.directory)
            args.directories.pop()
    tests = testsuite.load_testsuite_or_tests(logger, args.directories)
    logger.debug("basetests=%s", basetests)
    logger.debug("tests=%s", tests)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    # When an explicit list of directories was specified always print
    # all of them (otherwise, tests seem to get lost).
    if isinstance(tests, list):
        args.list_untested = True

    # Preload the baseline.  This avoids re-scanning the TESTLIST.
    # Also, passing the full baseline to Test.results() lets that
    # function differentiate between a baseline missing results or
    # being entirely absent.
    baseline = None
    if basetests:
        baseline = {}
        for test in basetests:
            baseline[test.name] = test

    for test in tests:

        # Produce separate runtimes for each test.
        with logutil.TIMER:

            logger.debug("start processing test %s", test.name)

            # Filter out tests that are being ignored?
            ignore = testsuite.ignore(test, args)
            if ignore and not args.list_ignored:
                continue

            # Filter out tests that have not been run?
            result = None
            if not ignore:
                result = post.mortem(
                    test,
                    args,
                    baseline=baseline,
                    output_directory=test.old_output_directory,
                    skip_sanitize=args.quick or args.quick_sanitize,
                    skip_diff=args.quick or args.quick_diff,
                    update=args.update,
                    update_sanitize=args.update_sanitize,
                    update_diff=args.update_diff)
                if not result and not args.list_untested:
                    continue

            sep = ""

            if args.print_name:
                print(sep, end="")
                print(test.name, end="")
                sep = " "

            if args.print_directory:
                print(sep, end="")
                print(test.directory, end="")
                sep = " "

            if ignore:
                print(sep, end="")
                print("ignored", ignore, end="")
                sep = " "

            if result:
                print(sep, end="")
                print(result, end="")
                sep = " "

            print()

            if args.print_diff and result:
                for domain in result.diffs:
                    for line in result.diffs[domain]:
                        if line:
                            print(line)

            sys.stdout.flush()

            logger.debug("stop processing test %s", test.name)

    return 0
Esempio n. 16
0
def main():

    parser = argparse.ArgumentParser(description="list test results",
                                     epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("--quick", action="store_true",
                        help=("Use the previously generated '.console.txt' and '.console.diff' files"))

    parser.add_argument("--update", action="store_true",
                        help=("Update the '.console.txt' and '.console.diff' files"))

    parser.add_argument("--dump-args", action="store_true")

    # how to parse --print directory,saved-directory,...?
    parser.add_argument("--print", action="store",
                        default=Print(Print.path, Print.result, Print.issues),
                        type=Print, metavar=str(Print),
                        help="comman separate list of attributes to print for each test; default: '%(default)s'")

    parser.add_argument("--stats", action="store", default=Stats.summary, type=Stats,
                        choices=[c for c in Stats],
                        help="provide overview statistics; default: \"%(default)s\"");

    baseline_metavar = "BASELINE-DIRECTORY"
    baseline_help = "additional %(metavar)s containing results to compare against; any divergence between the test and baseline results are displayed"
    parser.add_argument("--baseline", "-b",
                        metavar=baseline_metavar, help=baseline_help)

    parser.add_argument("--json", action="store_true",
                        help="output each result as an individual json object (pipe the output through 'jq -s .' to convert it to a well formed json list")

    parser.add_argument("directories", metavar="DIRECTORY-OR-FILE", nargs="+",
                        help="a directory containing: a test, testsuite, test output, or testsuite output; or a file containing a 'TESTLIST'")

    # Note: this argument serves as documentation only.  The RESULT
    # argument should consumes all remaining parameters.
    parser.add_argument("baseline_ignored", nargs="?",
                        metavar=baseline_metavar, help=baseline_help)

    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)
    skip.add_arguments(parser)
    ignore.add_arguments(parser)

    # These three calls go together
    args = parser.parse_args()
    logutil.config(args, sys.stderr)
    logger = logutil.getLogger("kvmresults")

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0

    if args.dump_args:
        logger.info("Arguments:")
        logger.info("  Stats: %s", args.stats)
        logger.info("  Print: %s", args.print)
        logger.info("  Baseline: %s", args.baseline)
        logger.info("  Json: %s", args.json)
        logger.info("  Quick: %s", args.quick)
        logger.info("  Update: %s", args.update)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        skip.log_arguments(logger, args)
        ignore.log_arguments(logger, args)
        return 0

    # Try to find a baseline.  If present, pre-load it.
    baseline = None
    if args.baseline:
        # An explict baseline testsuite, can be more forgiving in how
        # it is loaded.
        baseline = testsuite.load(logger, logutil.DEBUG, args,
                                  testsuite_directory=args.baseline,
                                  error_level=logutil.DEBUG)
        if not baseline:
            # Perhaps the baseline just contains output, magic up the
            # corresponding testsuite directory.
            baseline_directory = os.path.join(args.testing_directory, "pluto")
            baseline = testsuite.load(logger, logutil.DEBUG, args,
                                      testsuite_directory=baseline_directory,
                                      testsuite_output_directory=args.baseline,
                                      error_level=logutil.DEBUG)
        if not baseline:
            logger.info("'%s' is not a baseline", args.baseline)
            return 1
    elif len(args.directories) > 1:
        # If there is more than one directory then, perhaps, the last
        # one is a baseline.  A baseline might be: a complete
        # testsuite snapshot; or just output saved as
        # testing/pluto/OUTPUT/TESTDIR.
        baseline = testsuite.load(logger, logutil.DEBUG, args,
                                  testsuite_directory=args.directories[-1])
        if baseline:
            # discard the last argument as consumed above.
            logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1])
            args.directories.pop()

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    result_stats = stats.Results()
    try:
        results(logger, tests, baseline, args, result_stats)
    finally:
        if args.stats is Stats.details:
            result_stats.log_details(stderr_log, header="Details:", prefix="  ")
        if args.stats in [Stats.details, Stats.summary]:
            result_stats.log_summary(stderr_log, header="Summary:", prefix="  ")

    return 0
Esempio n. 17
0
def main():

    # If SIGUSR1, backtrace all threads; hopefully this is early
    # enough.
    faulthandler.register(signal.SIGUSR1)

    parser = argparse.ArgumentParser(
        description="list test results",
        epilog=
        "By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).  SIGUSR1 will dump all thread stacks"
    )
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument(
        "--exit-ok",
        action="store_true",
        help=
        ("return a zero exit status; normally, when there are failures, a non-zero exit status is returned"
         ))

    parser.add_argument(
        "--quick",
        action="store_true",
        help=
        ("Use the previously generated '.console.txt' and '.console.diff' files"
         ))

    parser.add_argument(
        "--update",
        action="store_true",
        help=("Update the '.console.txt' and '.console.diff' files"))

    parser.add_argument("--dump-args", action="store_true")

    # how to parse --print directory,saved-directory,...?
    parser.add_argument(
        "--print",
        action="store",
        default=printer.Print(printer.Print.PATH, printer.Print.RESULT,
                              printer.Print.ISSUES),
        type=printer.Print,
        metavar=str(printer.Print),
        help=
        "comman separate list of attributes to print for each test; default: '%(default)s'"
    )

    parser.add_argument(
        "--stats",
        action="store",
        default=Stats.summary,
        type=Stats,
        choices=[c for c in Stats],
        help="provide overview statistics; default: \"%(default)s\"")

    baseline_metavar = "BASELINE-DIRECTORY"
    baseline_help = "additional %(metavar)s containing results to compare against; any divergence between the test and baseline results are displayed"
    parser.add_argument("--baseline",
                        "-b",
                        metavar=baseline_metavar,
                        help=baseline_help)

    parser.add_argument(
        "--json",
        action="store_true",
        help=
        "output each result as an individual json object (pipe the output through 'jq -s .' to convert it to a well formed json list"
    )

    parser.add_argument(
        "directories",
        metavar="DIRECTORY-OR-FILE",
        nargs="+",
        help=
        "a directory containing: a test, testsuite, test output, or testsuite output; or a file containing a 'TESTLIST'"
    )

    # Note: this argument serves as documentation only.  The RESULT
    # argument should consumes all remaining parameters.
    parser.add_argument("baseline_ignored",
                        nargs="?",
                        metavar=baseline_metavar,
                        help=baseline_help)

    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)
    # XXX: while checking for an UNTESTED test should be very cheap
    # (does OUTPUT/ exist?) it isn't.  Currently it triggers a full
    # post-mortem analysis.
    skip.add_arguments(parser, skip.Skip.UNTESTED)
    ignore.add_arguments(parser)
    publish.add_arguments(parser)

    # These three calls go together
    args = parser.parse_args()
    logutil.config(args, sys.stderr)
    logger = logutil.getLogger("kvmresults")

    if args.dump_args:
        logger.info("Arguments:")
        logger.info("  Stats: %s", args.stats)
        logger.info("  Print: %s", args.print)
        logger.info("  Baseline: %s", args.baseline)
        logger.info("  Json: %s", args.json)
        logger.info("  Quick: %s", args.quick)
        logger.info("  Update: %s", args.update)
        logger.info("  Exit OK: %s", args.exit_ok)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        skip.log_arguments(logger, args)
        ignore.log_arguments(logger, args)
        publish.log_arguments(logger, args)
        return 0

    # Try to find a baseline.  If present, pre-load it.
    baseline = None
    if args.baseline:
        # An explict baseline testsuite, can be more forgiving in how
        # it is loaded.
        baseline = testsuite.load(logger,
                                  logutil.DEBUG,
                                  args,
                                  testsuite_directory=args.baseline,
                                  error_level=logutil.DEBUG)
        if not baseline and os.path.isdir(args.baseline):
            # Perhaps, AKA BACKUP/YYYY-MM-DDD-..., the baseline
            # directory only contains a copy of the output.  Magic up
            # a baseline by combining the output with the tests in
            # ARGS.TESTING_DIRECTORY.
            baseline_directory = os.path.join(args.testing_directory, "pluto")
            baseline = testsuite.load(logger,
                                      logutil.DEBUG,
                                      args,
                                      testsuite_directory=baseline_directory,
                                      testsuite_output_directory=args.baseline,
                                      error_level=logutil.DEBUG)
        if not baseline:
            logger.info("'%s' is not a baseline", args.baseline)
            return 1
    elif len(args.directories) > 1:
        # If there is more than one directory then, perhaps, the last
        # one is a baseline.  A baseline might be: a complete
        # testsuite snapshot; or just output saved as
        # testing/pluto/OUTPUT/TESTDIR.
        baseline = testsuite.load(logger,
                                  logutil.DEBUG,
                                  args,
                                  testsuite_directory=args.directories[-1])
        if baseline:
            # discard the last argument as consumed above.
            logger.debug("discarding baseline testsuite argument '%s'",
                         args.directories[-1])
            args.directories.pop()

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    result_stats = stats.Results()
    exit_code = 125  # assume a 'git bisect' barf
    try:
        exit_code = results(logger, tests, baseline, args, result_stats)
    finally:
        if args.stats is Stats.details:
            result_stats.log_details(stderr_log,
                                     header="Details:",
                                     prefix="  ")
        if args.stats in [Stats.details, Stats.summary]:
            result_stats.log_summary(stderr_log,
                                     header="Summary:",
                                     prefix="  ")
        publish.json_results(logger, args)
        publish.json_summary(logger, args)

    return exit_code
Esempio n. 18
0
def main():

    parser = argparse.ArgumentParser(description="Connect to and run a shell command on a virtual machine domain",
                                     epilog=("If no command or file is specified an interactive shell is created."))

    parser.add_argument("--timeout", type=argutil.timeout, default=None,
                        help=("maximum runtime for the command"
                              "; -1 for no timeout"
                              " (default: no timeout)"))
    argutil.add_redirect_argument(parser, "re-direct console output from stdout to %(metavar)s",
                                  "--output", "-o",
                                  default=sys.stdout, metavar="FILE")

    parser.add_argument("--chdir", default=None, action="store", metavar="PATH",
                        help=("first change directory to %(metavar)s on the remote"
                              " domain and update prompt-match logic to expect"
                              " that directory"
                              "; an absolute %(metavar)s is used unmodified"
                              "; a relative  %(metavar)s, which is interpreted"
                              " as relative to the current local working directory"
                              ", is converted to an absolute remote path before use"
                              " (default: leave directory unchanged)"))
    parser.add_argument("--boot", default=None, action="store",
                        choices=set(["cold", "warm"]),
                        help=("force the domain to boot"
                              "; 'cold': power-off any existing domain"
                              "; 'warm': reboot any existing domain"
                              " (default: leave existing domain running)"))
    parser.add_argument("--shutdown", default=False, action="store_true",
                        help=("on-completion shut down the domain"
                              " (default: leave the domain running)"))
    parser.add_argument("--mode", default=None,
                        choices=set(["interactive", "batch"]),
                        help=("enter mode"
                              " (default: if there is no command enter interactive mode)"))

    parser.add_argument("domain", action="store",
                        help="domain (virtual machine) to connect to")
    parser.add_argument("command", nargs="?",
                        help="run shell command non-interactively")

    logutil.add_arguments(parser)
    args = parser.parse_args()
    logutil.config(args)

    # Get things started
    domain = virsh.Domain(args.domain)

    status = 0
    console = None

    # Get the current console, this will be None if the machine is
    # shutoff.
    console = domain.console()
    if args.boot:
        if args.boot == "warm":
            if console:
                remote.reboot(domain, console)
            else:
                console = remote.start(domain)
        elif args.boot == "cold":
            if console:
                remote.shutdown(domain, console)
            console = remote.start(domain)

    # Find a reason to log-in and interact with the console.
    batch = args.mode == "batch" or args.command != None
    interactive = args.mode == "interactive" or (args.command == None and args.boot == None and not args.shutdown)

    if interactive or batch:

        # If the machine hasn't been booted, do so now.
        if not console:
            console = remote.start(domain)
        remote.login(domain, console)

        if args.chdir and os.path.isabs(args.chdir):
            chdir = args.chdir
        elif args.chdir:
            chdir = remote.directory(domain, console, directory=os.path.abspath(args.chdir))
        else:
            chdir = None
        if chdir:
            domain.logger.info("'cd' to %s", chdir)
            console.chdir(chdir)

        if args.command:

            console.output(args.output)
            console.run("")

            status = console.run(args.command, timeout=args.timeout)
            print()

        if interactive:

            print()
            output = console.output(None)
            if output:
                print("info: disabled --output as it makes pexpect crash when in interactive mode.")
            if args.debug:
                print("info: pexpect ignores --debug in interactive mode!")
            print("Escape character is ^]")
            # Hack so that the prompt appears
            console.output(sys.stdout)
            console.run("")
            console.output()
            # Normal mode
            console.stty_sane()
            console.interact()

    if args.shutdown:
        shutdown_status = remote.shutdown(domain)
        status = status or shutdown_status

    sys.exit(status)
Esempio n. 19
0
def main():

    parser = argparse.ArgumentParser(description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]",
                                     epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).  If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified.")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("--quick", action="store_true",
                        help=("Use the previously generated '.console.txt' and '.console.diff' files"))
    parser.add_argument("--quick-sanitize", action="store_true",
                        help=("Use the previously generated '.console.txt' file"))
    parser.add_argument("--quick-diff", action="store_true",
                        help=("Use the previously generated '.console.diff' file"))

    parser.add_argument("--update", action="store_true",
                        help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize", action="store_true",
                        help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff", action="store_true",
                        help=("Update the '.console.diff' file"))

    parser.add_argument("--dump-args", action="store_true")

    parser.add_argument("--prefix", action="store", type=Prefix,
                        choices=[p for p in Prefix],
                        help="prefix to display with each test")

    # how to parse --print directory,saved-directory,...?
    parser.add_argument("--print", action="append", default=[],
                        choices=[p for p in Print], type=Print,
                        help="what information to display about each test")

    parser.add_argument("--stats", action="store", default=Stats.summary, type=Stats,
                        choices=[c for c in Stats],
                        help="provide overview statistics; default: \"%(default)s\"");

    parser.add_argument("--baseline", metavar="DIRECTORY",
                        help="a %(metavar)s containing baseline testsuite output")

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="%(metavar)s containing: a test, a testsuite (contains a TESTLIST file), a TESTLIST file, test output, or testsuite output")
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consumes all remaining arguments.
    parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?",
                        help="an optional testsuite directory (contains a TESTLIST file) containing output from a previous test run")

    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)
    skip.add_arguments(parser)
    ignore.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # default to printing results
    if not args.print:
        args.print = [Print.result]

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0

    if args.dump_args:
        logger.info("Arguments:")
        logger.info("  Stats: %s", args.stats)
        logger.info("  Print: %s", args.print)
        logger.info("  Prefix: %s", args.prefix)
        post.log_arguments(logger, args)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        skip.log_arguments(logger, args)
        ignore.log_arguments(logger, args)
        return 0

    # Try to find a baseline.  If present, pre-load it.
    baseline = None
    if args.baseline:
        # An explict baseline testsuite, can be more forgiving in how
        # it is loaded.
        baseline = testsuite.load(logger, args,
                                  testsuite_directory=args.baseline,
                                  error_level=logutil.DEBUG)
        if not baseline:
            # Perhaps the baseline just contains output, magic up the
            # corresponding testsuite directory.
            baseline_directory = os.path.join(args.testing_directory, "pluto")
            baseline = testsuite.load(logger, args,
                                      testsuite_directory=baseline_directory,
                                      saved_testsuite_output_directory=args.baseline,
                                      error_level=logutil.DEBUG)
        if not baseline:
            logger.info("'%s' is not a baseline", args.baseline)
            return 1
    elif len(args.directories) > 1:
        # If there is more than one directory then, perhaps, the last
        # one is a baseline.  A baseline might be: a complete
        # testsuite snapshot; or just output saved as
        # testing/pluto/OUTPUT/TESTDIR.
        baseline = testsuite.load(logger, logutil.DEBUG, args,
                                  testsuite_directory=args.directories[-1])
        if baseline:
            # discard the last argument as consumed above.
            logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1])
            args.directories.pop()

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    result_stats = stats.Results()
    try:
        results(logger, tests, baseline, args, result_stats)
    finally:
        if args.stats is Stats.details:
            result_stats.log_details(stderr_log, header="Details:", prefix="  ")
        if args.stats in [Stats.details, Stats.summary]:
            result_stats.log_summary(stderr_log, header="Summary:", prefix="  ")

    return 0
Esempio n. 20
0
def main():

    parser = argparse.ArgumentParser(description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]",
                                     epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).  If a BASELINE directory is specified, anywhere a test result is different to the baseline is also identified.")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("--quick", action="store_true",
                        help=("Use the previously generated '.console.txt' and '.console.diff' files"))
    parser.add_argument("--quick-sanitize", action="store_true",
                        help=("Use the previously generated '.console.txt' file"))
    parser.add_argument("--quick-diff", action="store_true",
                        help=("Use the previously generated '.console.diff' file"))
    parser.add_argument("--update", action="store_true",
                        help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize", action="store_true",
                        help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff", action="store_true",
                        help=("Update the '.console.diff' file"))

    parser.add_argument("--prefix-directory", action="store_true")
    parser.add_argument("--prefix-name", action="store_true")
    parser.add_argument("--prefix-output-directory", action="store_true")

    parser.add_argument("--print-result", action="store_true")
    parser.add_argument("--print-diff", action="store_true")
    parser.add_argument("--print-args", action="store_true")
    parser.add_argument("--print-scripts", action="store_true")
    parser.add_argument("--print-domains", action="store_true")
    parser.add_argument("--print-initiators", action="store_true")

    parser.add_argument("--stats", action="store", default="summary",
                        choices=["details", "summary", "none"],
                        help="provide overview statistics; default: \"%(default)s\"");

    parser.add_argument("--list-ignored", action="store_true",
                        help="include ignored tests in the list")
    parser.add_argument("--list-untested", action="store_true",
                        help="include untested tests in the list")

    parser.add_argument("--baseline", metavar="DIRECTORY",
                        help="a %(metavar)s containing baseline testsuite output")

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="%(metavar)s containing: a test, a testsuite (contains a TESTLIST file), test output, or testsuite output")
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consumes all remaining arguments.
    parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?",
                        help="an optional testsuite directory (contains a TESTLIST file) containing output from a previous test run")

    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # default to printing results
    if not args.print_scripts \
       and not args.print_result \
       and not args.print_diff \
       and not args.print_initiators \
       and not args.print_domains:
        args.print_result = True

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0
    args.prefix_directory = args.prefix_directory or args.verbose > v
    args.prefix_name = args.prefix_name or args.verbose > v
    args.print_result = args.print_result or args.verbose > v
    v += 1
    args.prefix_output_directory = args.prefix_output_directory or args.verbose > v
    v += 1
    args.list_untested = args.list_untested or args.verbose > v
    args.list_ignored = args.list_ignored or args.verbose > v
    v += 1
    args.print_scripts = args.print_scripts or args.verbose > v
    v += 1
    args.print_args = args.print_args or args.verbose > v

    if args.print_args:
        post.log_arguments(logger, args)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        return 1

    # Try to find a baseline.  If present, pre-load it.
    baseline = None
    if args.baseline:
        # An explict baseline testsuite, can be more forgiving.
        baseline = testsuite.load(logger, args,
                                  testsuite_directory=args.baseline,
                                  testsuite_output_directory=None,
                                  error_level=logutil.DEBUG)
        if not baseline:
            # Assume that it is baseline output only.
            if args.testing_directory:
                baseline_directory = os.path.join(args.testing_directory, "pluto")
            else:
                baseline_directory = utils.directory("..", "pluto")
            baseline = testsuite.load(logger, args,
                                      testsuite_directory=baseline_directory,
                                      testsuite_output_directory=args.baseline,
                                      error_level=logutil.DEBUG)
        if not baseline:
            logger.info("'%s' is not a baseline", args.baseline)
            return 1
    elif len(args.directories) > 1:
        # If there is more than one directory then, perhaps, the last
        # one is a baseline.  A baseline might be: a complete
        # testsuite snapshot; or just output saved as
        # testing/pluto/OUTPUT/TESTDIR.
        baseline = testsuite.load(logger, args,
                                  testsuite_directory=args.directories[-1],
                                  testsuite_output_directory=None,
                                  error_level=logutil.DEBUG)
        if baseline:
            # discard the last argument as consumed above.
            logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1])
            args.directories.pop()

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    # When an explicit list of directories was specified always print
    # all of them (otherwise, tests seem to get lost).
    if isinstance(tests, list):
        args.list_untested = True

    result_stats = stats.Results()
    try:
        results(logger, tests, baseline, args, result_stats)
    finally:
        if args.stats == "details":
            result_stats.log_details(stderr_log, header="Details:", prefix="  ")
        if args.stats in ["details", "summary"]:
            result_stats.log_summary(stderr_log, header="Summary:", prefix="  ")

    return 0
Esempio n. 21
0
def main():

    parser = argparse.ArgumentParser(
        description=
        "Connect to and run a shell command on a virtual machine domain",
        epilog=
        ("If no command or file is specified an interactive shell is created."
         ))

    parser.add_argument("--timeout",
                        type=argutil.timeout,
                        default=None,
                        help=("maximum runtime for the command"
                              "; -1 for no timeout"
                              " (default: no timeout)"))
    argutil.add_redirect_argument(
        parser,
        "re-direct console output from stdout to %(metavar)s",
        "--output",
        "-o",
        default=sys.stdout,
        metavar="FILE")

    parser.add_argument(
        "--chdir",
        default=None,
        action="store",
        metavar="PATH",
        help=("first change directory to %(metavar)s on the remote"
              " domain and update prompt-match logic to expect"
              " that directory"
              "; an absolute %(metavar)s is used unmodified"
              "; a relative  %(metavar)s, which is interpreted"
              " as relative to the current local working directory"
              ", is converted to an absolute remote path before use"
              " (default: leave directory unchanged)"))
    parser.add_argument("--boot",
                        default=None,
                        action="store",
                        choices=set(["cold", "warm"]),
                        help=("force the domain to boot"
                              "; 'cold': power-off any existing domain"
                              "; 'warm': reboot any existing domain"
                              " (default: leave existing domain running)"))
    parser.add_argument("--shutdown",
                        default=False,
                        action="store_true",
                        help=("on-completion shut down the domain"
                              " (default: leave the domain running)"))
    parser.add_argument(
        "--mode",
        default=None,
        choices=set(["interactive", "batch"]),
        help=("enter mode"
              " (default: if there is no command enter interactive mode)"))
    parser.add_argument("--hostname", default=None, help="Domain's host name")

    parser.add_argument("domain",
                        action="store",
                        help="domain (virtual machine) to connect to")
    parser.add_argument("command",
                        nargs="?",
                        help="run shell command non-interactively")

    logutil.add_arguments(parser)
    args = parser.parse_args()
    logutil.config(args)

    # Get things started
    domain = virsh.Domain(args.domain, hostname=args.hostname)

    status = 0
    console = None

    # Get the current console, this will be None if the machine is
    # shutoff.
    console = domain.console()
    if args.boot:
        if args.boot == "warm":
            if console:
                remote.reboot(domain, console)
            else:
                console = remote.start(domain)
        elif args.boot == "cold":
            if console:
                remote.shutdown(domain, console)
            console = remote.start(domain)

    # Find a reason to log-in and interact with the console.
    batch = args.mode == "batch" or args.command != None
    interactive = args.mode == "interactive" or (args.command == None
                                                 and args.boot == None
                                                 and not args.shutdown)

    if interactive or batch:

        # If the machine hasn't been booted, do so now.
        if not console:
            console = remote.start(domain)
        remote.login(domain, console)

        if args.chdir and os.path.isabs(args.chdir):
            chdir = args.chdir
        elif args.chdir:
            chdir = remote.directory(domain,
                                     console,
                                     directory=os.path.abspath(args.chdir))
        else:
            chdir = None
        if chdir:
            domain.logger.info("'cd' to %s", chdir)
            console.chdir(chdir)

        if args.command:

            console.output(args.output)
            console.run("")

            status = console.run(args.command, timeout=args.timeout)
            print()

        if interactive:

            print()
            output = console.output(None)
            if output:
                print(
                    "info: disabled --output as it makes pexpect crash when in interactive mode."
                )
            if args.debug:
                print("info: pexpect ignores --debug in interactive mode!")
            print("Escape character is ^]")
            # Hack so that the prompt appears
            console.output(sys.stdout)
            console.run("")
            console.output()
            # Normal mode
            console.stty_sane()
            console.interact()

    if args.shutdown:
        shutdown_status = remote.shutdown(domain)
        status = status or shutdown_status

    sys.exit(status)
Esempio n. 22
0
def main():

    parser = argparse.ArgumentParser(description="Connect to and run a shell command on a virtual machine domain",
                                     epilog=("If no command or file is specified an interactive shell is created."))

    parser.add_argument("--timeout", type=argutil.timeout, default=None,
                        help=("maximum runtime for the command"
                              "; -1 for no timeout"
                              " (default: no timeout)"))
    argutil.add_redirect_argument(parser, "re-direct console output from stdout to %(metavar)s",
                                  "--output", "-o",
                                  default=sys.stdout, metavar="FILE")

    parser.add_argument("--chdir", default=None, action="store", metavar="PATH",
                        help=("first change directory to %(metavar)s on the remote"
                              " domain and update prompt-match logic to expect"
                              " that directory"
                              "; an absolute %(metavar)s is used unmodified"
                              "; a relative  %(metavar)s, which is interpreted"
                              " as relative to the current local working directory"
                              ", is converted to an absolute remote path before use"
                              " (default: leave directory unchanged)"))
    parser.add_argument("--boot", default=None, action="store",
                        type=Boot, choices=[e for e in Boot],
                        help=("force the domain to boot"
                              "; 'cold': power-off any existing domain"
                              "; 'warm': reboot any existing domain"
                              " (default: leave existing domain running)"))
    parser.add_argument("--shutdown", default=False, action="store_true",
                        help=("on-completion shut down the domain"
                              " (default: leave the domain running)"))
    parser.add_argument("--mode", default=None,
                        choices=set(["interactive", "batch"]),
                        help=("enter mode"
                              " (default: if there is no command enter interactive mode)"))
    parser.add_argument("--host-name", default=None,
                        help="The virtual machine's host name")

    parser.add_argument("domain", action="store", metavar="DOMAIN",
                        help="virtual machine (domain) to connect to")

    parser.add_argument("command", nargs=argparse.REMAINDER, metavar="COMMAND",
                        help="run shell command non-interactively; WARNING#1: this simply concatenates remaining arguments with spaces; WARNING#2: this does not try to escape arguments before passing them onto the domain's shell")

    logutil.add_arguments(parser)

    # These three calls go together
    args = parser.parse_args()
    logutil.config(args, sys.stderr)
    logger = logutil.getLogger("kvmsh")

    # Get things started
    domain = virsh.Domain(domain_name=args.domain, host_name=args.host_name)

    # Find a reason to log-in and interact with the console.
    batch = args.mode == "batch" or args.command
    interactive = args.mode == "interactive" or (not args.command and args.boot == None and not args.shutdown)

    # Get the current console, this will be None if the machine is
    # shutoff.
    console = domain.console()
    if args.boot:
        if args.boot is Boot.cold and console:
            remote.shutdown(domain, console)
            console = None
        console = remote.boot_to_login_prompt(domain, console)
    elif (interactive or batch) and not console:
        console = remote.boot_to_login_prompt(domain, console)

    status = 0
    if interactive or batch:

        remote.login(domain, console)

        if args.chdir and os.path.isabs(args.chdir):
            chdir = args.chdir
        elif args.chdir:
            chdir = remote.directory(domain, console, directory=os.path.realpath(args.chdir))
        else:
            chdir = None
        if chdir:
            domain.logger.info("'cd' to %s", chdir)
            console.chdir(chdir)

        if args.command:

            console.output(args.output)
            console.run("")

            status = console.run(' '.join(args.command), timeout=args.timeout)
            print()

        if interactive:

            print()
            output = console.output(None)
            if output:
                logger.info("info: option --output disabled as it makes pexpect crash when in interactive mode.")
            if args.debug:
                logger.info("info: pexpect ignores --debug in interactive mode!")
            logger.info("Escape character is ^]")
            # Hack so that the prompt appears
            console.output(sys.stdout)
            console.run("")
            console.output()
            # Get this terminals properties.
            columns, rows = os.get_terminal_size()
            # Normal mode
            console.stty_sane(term=os.getenv("TERM"), rows=rows, columns=columns)
            console.interact()

    if args.shutdown:
        shutdown_status = remote.shutdown(domain)
        status = status or shutdown_status

    sys.exit(status)
Esempio n. 23
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT", default=1,
                        help="which previously run tests should be retried: 0 selects not-started tests; 1 selects not-started+failed tests; -1 selects not-started+failed+passed tests (default is %(default)s)")
    parser.add_argument("--attempts", type=int, default=1,
                        help="number of times to attempt a test before giving up; default %(default)s")

    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    # Default to BACKUP under the current directory.  Name is
    # arbitrary, chosen for its hopefully unique first letter
    # (avoiding Makefile, OBJ, README, ... :-).
    parser.add_argument("--backup-directory", metavar="DIRECTORY", default="BACKUP",
                        help="backup existing <test>/OUTPUT to %(metavar)s/<date>/<test> (default: %(default)s)")

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="either a testsuite directory or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry)
    logger.info("  attempts: %s", args.attempts)
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  backup-directory: %s", args.backup_directory)
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    test_stats = stats.Tests()
    result_stats = stats.Results()

    start_time = datetime.now()
    exit_code = 0

    try:
        logger.info("run started at %s", start_time)

        test_count = 0
        for test in tests:

            test_stats.add(test, "total")
            test_count += 1
            # Would the number of tests to be [re]run be better?
            test_prefix = "****** %s (test %d of %d)" % (test.name, test_count, len(tests))

            ignore, details = testsuite.ignore(test, args)
            if ignore:
                result_stats.add_ignored(test, ignore)
                test_stats.add(test, "ignored")
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("%s: ignore (%s)", test_prefix, details)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.

            # Be lazy with gathering the results, don't run the
            # sanitizer or diff.
            old_result = post.mortem(test, args, skip_diff=True, skip_sanitize=True)
            if args.retry >= 0:
                if old_result:
                    if old_result.passed:
                        logger.info("%s: passed", test_prefix)
                        test_stats.add(test, "skipped")
                        result_stats.add_skipped(old_result)
                        continue
                    if args.retry == 0:
                        logger.info("%s: %s (delete '%s' to re-test)", test_prefix,
                                    result, test.output_directory)
                        test_stats.add(test, "skipped")
                        result_stats.add_skipped(old_result)
                        continue
                    test_stats.add(test, "retry")

            logger.info("%s: starting ...", test_prefix)
            test_stats.add(test, "tests")

            # Move the contents of the existing OUTPUT directory to
            # BACKUP_DIRECTORY.  Do it file-by-file so that, at no
            # point, the directory is empty.
            #
            # By moving each test just before it is started a trail of
            # what tests were attempted at each run is left.
            #
            # XXX: During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # Always moving the directory contents to the
            # BACKUP_DIRECTORY mitigates this some.

            saved_output_directory = None
            if os.path.exists(test.output_directory):
                saved_output_directory = os.path.join(args.backup_directory,
                                                      start_time.strftime("%Y%m%d%H%M%S"),
                                                      test.name)
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, saved_output_directory)
                # Copy "empty" OUTPUT directories too.
                args.dry_run or os.makedirs(saved_output_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(saved_output_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    args.dry_run or os.replace(src, dst)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            for attempt in range(args.attempts):
                test_stats.add(test, "attempts")

                # Create the OUTPUT directory.
                try:
                    if not args.dry_run:
                        os.mkdir(test.output_directory)
                    elif os.exists(test.output_directory):
                        raise FileExistsError()
                except FileExistsError:
                    # On first attempt, the OUTPUT directory will
                    # be empty (see above) so no need to save.
                    if attempt > 0:
                        saved_output_directory = os.path.join(test.output_directory, str(attempt))
                        logger.info("moving contents of '%s' to '%s'",
                                    test.output_directory, saved_output_directory)
                        args.dry_run or os.makedirs(saved_output_directory, exist_ok=True)
                        for name in os.listdir(test.output_directory):
                            if os.path.isfile(src):
                                src = os.path.join(test.output_directory, name)
                                dst = os.path.join(saved_output_directory, name)
                                logger.debug("moving '%s' to '%s'", src, dst)
                                args.dry_run or os.replace(src, dst)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test attempt.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, attempt+1, args.attempts, datetime.now())

                    if saved_output_directory:
                        logger.info("contents of '%s' moved to '%s'",
                                    test.output_directory, saved_output_directory)
                    saved_output_directory = None

                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, args)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "%s"\n' % result)
                    except pexpect.TIMEOUT as e:
                        logger.exception("**** test %s timed out ****", test.name)
                        ending = "timed-out"
                        # If the test has no output to check against, this will "pass"
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    test_stats.add(test, "attempts", ending, str(result))
                    if result.errors:
                        logger.info("****** test %s %s %s ******", test.name, result, result.errors)
                    else:
                        logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT.  During a control-c or crash
            # the below will not be executed.

            test_stats.add(test, "tests", str(result))
            result_stats.add_result(result, old_result)

            test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
            result_stats.log_summary(logger.info, header="updated test results:", prefix="  ")

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        exit_code = 1

    test_stats.log_details(args.verbose and logger.info or logger.debug,
                           header="final stat details:", prefix="  ")
    result_stats.log_details(logger.info, header="final test details:", prefix="  ")

    test_stats.log_summary(logger.info, header="final test stats:", prefix="  ")
    result_stats.log_summary(logger.info, header="final test results:", prefix="  ")

    end_time = datetime.now()
    logger.info("run finished at %s after %s", end_time, end_time - start_time)

    return exit_code
Esempio n. 24
0
def main():

    parser = argparse.ArgumentParser(description="list all tests in the form: <test> [ <directory> ] [ <result> <details...> ]")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("--print-directory", action="store_true")
    parser.add_argument("--print-name", action="store_true")
    parser.add_argument("--print-result", action="store_true")

    parser.add_argument("--list-ignored", action="store_true",
                        help="include ignored tests in the list")
    parser.add_argument("--list-untested", action="store_true",
                        help="include untested tests in the list")

    parser.add_argument("directories", metavar="TEST-DIRECTORY", nargs="+",
                        help=("Either a testsuite (only one) or test directory"))
    # Note: this argument serves as documentation only.  The
    # TEST-DIRECTORY argument always consume all remaining parameters.
    parser.add_argument("baseline", metavar="BASELINE-DIRECTORY", nargs="?",
                        help=("An optional testsuite directory containing"
                              " results from a previous test run"))
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0
    args.print_directory = args.print_directory or args.verbose > v
    args.print_name = args.print_name or args.verbose > v
    v += 1
    args.list_untested = args.list_untested or args.verbose > v ; v += 1
    args.list_ignored = args.list_ignored or args.verbose > v ; v += 1

    # By default print the relative directory path.
    if not args.print_directory and not args.print_name:
        args.print_directory = True

    # If there is more than one directory then the last might be the
    # baseline.  Try loading it as a testsuite (baselines are
    # testsuites) to see if that is the case.
    basetests = None
    tests = None
    if len(args.directories) > 1:
        # Perhaps the last argument is the baseline?
        basetests = testsuite.load(logger, args.directories[-1])
        if basetests:
            logger.debug("basetests loaded from '%s'", basetests.directory)
            args.directories.pop()
    tests = testsuite.load_testsuite_or_tests(logger, args.directories)
    logger.debug("basetests=%s", basetests)
    logger.debug("tests=%s", tests)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    # When an explicit list of directories was specified always print
    # all of them (otherwise, tests seem to get lost).
    if isinstance(tests, list):
        args.list_untested = True

    # Preload the baseline.  This avoids re-scanning the TESTLIST and,
    # when errors, printing those repeatedly.  Also, passing the full
    # baseline to Test.results() lets that function differentiate
    # between a baseline missing results or being entirely absent.
    baseline = None
    if basetests:
        baseline = {}
        for test in basetests:
            baseline[test.name] = test

    for test in tests:

        # Filter out tests that are being ignored?
        ignore = testsuite.ignore(test, args)
        if ignore and not args.list_ignored:
            continue

        # Filter out tests that have not been run?
        result = None
        if not ignore:
            result = test.result(baseline)
            if not result and not args.list_untested:
                continue

        sep = ""

        if args.print_name:
            print(sep, end="")
            print(test.name, end="")
            sep = " "

        if args.print_directory:
            print(sep, end="")
            print(test.directory, end="")
            sep = " "

        if ignore:
            print(sep, end="")
            print("ignored", ignore, end="")
            sep = " "

        if result:
            print(sep, end="")
            print(result, end="")
            sep = " "

        print()

        sys.stdout.flush()

    return 0
Esempio n. 25
0
def main():

    # If SIGUSR1, backtrace all threads; hopefully this is early
    # enough.
    faulthandler.register(signal.SIGUSR1)

    parser = argparse.ArgumentParser(description="Connect to and run a shell command on a virtual machine domain",
                                     epilog="If no command or file is specified an interactive shell is created.  SIGUSR1 will dump all thread stacks")

    parser.add_argument("--timeout", type=argutil.timeout, default=None,
                        help=("maximum runtime for the command"
                              "; -1 for no timeout"
                              " (default: no timeout)"))
    argutil.add_redirect_argument(parser, "re-direct console output from stdout to %(metavar)s",
                                  "--output", "-o",
                                  default=sys.stdout, metavar="FILE")

    parser.add_argument("--chdir", default=None, action="store", metavar="PATH",
                        help=("first change directory to %(metavar)s on the remote"
                              " domain and update prompt-match logic to expect"
                              " that directory"
                              "; an absolute %(metavar)s is used unmodified"
                              "; a relative  %(metavar)s, which is interpreted"
                              " as relative to the current local working directory"
                              ", is converted to an absolute remote path before use"
                              " (default: leave directory unchanged)"))
    parser.add_argument("--boot", default=None, action="store",
                        type=Boot, choices=[e for e in Boot],
                        help=("force the domain to boot"
                              "; 'cold': power-off any existing domain"
                              "; 'warm': reboot any existing domain"
                              " (default: leave existing domain running)"))
    parser.add_argument("--shutdown", default=False, action="store_true",
                        help=("on-completion shut down the domain"
                              " (default: leave the domain running)"))
    parser.add_argument("--mode", default=None,
                        choices=set(["interactive", "batch"]),
                        help=("enter mode"
                              " (default: if there is no command enter interactive mode)"))
    parser.add_argument("--host-name", default=None,
                        help="The virtual machine's host name")

    parser.add_argument("domain", action="store", metavar="DOMAIN",
                        help="virtual machine (domain) to connect to")

    parser.add_argument("command", nargs=argparse.REMAINDER, metavar="COMMAND",
                        help="run shell command non-interactively; WARNING#1: this simply concatenates remaining arguments with spaces; WARNING#2: this does not try to escape arguments before passing them onto the domain's shell")

    logutil.add_arguments(parser)

    # These three calls go together
    args = parser.parse_args()
    logutil.config(args, sys.stderr)
    logger = logutil.getLogger("kvmsh")

    # Get things started
    domain = virsh.Domain(domain_name=args.domain, host_name=args.host_name)

    # Find a reason to log-in and interact with the console.
    batch = args.mode == "batch" or args.command
    interactive = args.mode == "interactive" or (not args.command and args.boot == None and not args.shutdown)

    # Get the current console, this will be None if the machine is
    # shutoff (and forced to none if a cold boot)

    console = domain.console()
    if args.boot is Boot.cold and console:
        remote.shutdown(domain, console)
        console = None

    status = 0
    if args.boot and not (interactive or batch):
        console = remote.boot_to_login_prompt(domain, console)

    elif interactive or batch:
        if console:
            remote.login(domain, console)
        else:
            console = remote.boot_and_login(domain, console)

        if args.chdir and os.path.isabs(args.chdir):
            chdir = args.chdir
        elif args.chdir:
            chdir = domain.guest_path(console, host_path=args.chdir)
        else:
            chdir = None
        if chdir:
            domain.logger.info("'cd' to %s", chdir)
            console.chdir(chdir)

        if args.command:

            if interactive:
                logger.info("info: option --output disabled as it makes pexpect crash when in interactive mode.")
            else:
                console.redirect_output(args.output)
            console.run("")

            status = console.run(' '.join(args.command), timeout=args.timeout)
            print()

        if interactive:

            print()
            if args.debug:
                logger.info("info: pexpect ignores --debug in interactive mode!")
            logger.info("Escape character is ^]")
            # Hack so that the prompt appears
            console.redirect_output(sys.stdout)
            console.run("")
            console.redirect_output(None)
            # Get this terminals properties.
            columns, rows = os.get_terminal_size()
            # Normal mode
            console.stty_sane(term=os.getenv("TERM"), rows=rows, columns=columns)
            console.interact()

    if args.shutdown:
        shutdown_status = remote.shutdown(domain)
        status = status or shutdown_status

    sys.exit(status)
Esempio n. 26
0
def main():
    parser = argparse.ArgumentParser(description="Run tests")

    # This argument's behaviour is overloaded; the shorter word "try"
    # is a python word.
    parser.add_argument("--retry", type=int, metavar="COUNT",
                        help=("number of times a test should be attempted before giving up"
                              " (tests are categorised as not-started (no OUTPUT directory), incomplete, failed, passed)"
                              "; a negative %(metavar)s selects all tests"
                              "; a zero %(metavar)s selects not-started tests"
                              "; a positive %(metavar)s selects not-started, incomplete and failing tests"
                              "; default is to select not-started tests"))
    parser.add_argument("--dry-run", "-n", action="store_true")
    parser.add_argument("--verbose", "-v", action="count", default=0)
    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help=("Either a testsuite directory or"
                              " a list of test directories"))
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    post.add_arguments(parser)
    logutil.add_arguments(parser)

    args = parser.parse_args()
    logutil.config(args)

    logger = logutil.getLogger("kvmrunner")
    logger.info("Options:")
    logger.info("  retry: %s", args.retry or "0 (default)")
    logger.info("  dry-run: %s", args.dry_run)
    logger.info("  directories: %s", args.directories)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    post.log_arguments(logger, args)
    logutil.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directory)
        return 1

    # A list of test directories was specified (i.e, not a testsuite),
    # then force the tests to run.
    if isinstance(tests, list) and args.retry is None:
        args.retry = 1;
        logger.info("Explicit directory list; forcing --retry=%d (retry failed tests)", args.retry)

    # Use a default dict so no need to worry about initializing values
    # to zero.
    stats = Stats()
    results = Results()
    start_time = time.localtime()

    try:
        logger.info("run started at %s", datetime.now())

        for test in tests:
            stats.add("total", test)

            ignore = testsuite.ignore(test, args)
            if ignore:
                stats.add("ignored", test)
                # No need to log all the ignored tests when an
                # explicit sub-set of tests is being run.  For
                # instance, when running just one test.
                if not args.test_name:
                    logger.info("*** %s: ignore (%s)", test.name, ignore)
                continue

            # Implement "--retry" as described above: if retry is -ve,
            # the test is always run; if there's no result, the test
            # is always run; skip passed tests; else things get a
            # little wierd.
            retry = args.retry or 0
            if retry >= 0:
                result = post.mortem(test, args)
                if result:
                    if result.passed:
                        logger.info("*** %s: passed", test.name)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
                    if retry == 0:
                        logger.info("*** %s: %s (delete '%s' to re-test)", test.name,
                                    result.value, test.output_directory)
                        stats.add("skipped", test)
                        results.add(result)
                        continue
            stats.add("tests", test)

            debugfile = None
            result = None

            # At least one iteration; above will have filtered out
            # skips and ignores
            runs = max(abs(retry), 1)
            for run in range(runs):
                stats.add("runs", test)

                # Create an output directory.  If there's already an
                # existing OUTPUT directory rename it to OUTPUT...
                # Need to do this before the OUTPUT/debug.log is
                # started as otherwise it too would get moved away.
                saved_output_directory = None
                if not args.dry_run:
                    try:
                        os.mkdir(test.output_directory)
                    except FileExistsError:
                        stats.add("reruns", test)
                        # Include the time this test run started in
                        # the suffix - that way all saved results can
                        # be matched using a wild card.  Include the
                        # time the directory was last modified in the
                        # suffix - it makes a good approximation as to
                        # when the previous test run finished.
                        stat = os.stat(test.output_directory)
                        mtime = time.localtime(os.stat(test.output_directory).st_mtime)
                        saved_output_directory = (test.output_directory
                                                + time.strftime(".%Y%m%d%H%M", start_time)
                                                + time.strftime(".%H%M%S", mtime))
                        logger.debug("renaming '%s' to '%s'",
                                     test.output_directory, saved_output_directory)
                        os.rename(test.output_directory, saved_output_directory)
                        # if the second attempt fails, let it crash
                        os.mkdir(test.output_directory)

                # Start a debug log in the OUTPUT directory; include
                # timing for this specific test run.
                with logutil.TIMER, logutil.Debug(logger, os.path.join(test.output_directory, "debug.log")):
                    logger.info("****** test %s attempt %d of %d started at %s ******",
                                test.name, run+1, runs, datetime.now())
                    # Add a log message about any saved output
                    # directory to the per-test-run debug log.  It
                    # just looks better.
                    if saved_output_directory:
                        logger.info("existing OUTPUT saved in '%s'",
                                    saved_output_directory)
                    ending = "undefined"
                    try:
                        if not args.dry_run:
                            runner.run_test(test, max_workers=args.workers)
                        ending = "finished"
                        result = post.mortem(test, args, update=(not args.dry_run))
                        if not args.dry_run:
                            # Store enough to fool the script
                            # pluto-testlist-scan.sh.
                            logger.info("storing result in '%s'", test.result_file)
                            with open(test.result_file, "w") as f:
                                f.write('"result": "')
                                f.write(result.value)
                                f.write('"')
                                f.write("\n")
                    except pexpect.TIMEOUT as e:
                        ending = "timeout"
                        logger.exception("**** test %s timed out ****", test.name)
                        result = post.mortem(test, args, update=(not args.dry_run))
                    # Since the OUTPUT directory exists, all paths to
                    # here should have a non-null RESULT.
                    stats.add("runs(%s:%s)" % (ending, result.value), test)
                    logger.info("****** test %s %s ******", test.name, result)
                    if result.passed:
                        break

            # Above will have set RESULT (don't reach here during
            # cntrl-c or crash).
            results.add(result)
            stats.add("tests(%s)" % result.value, test)

    except KeyboardInterrupt:
        logger.exception("**** test %s interrupted ****", test.name)
        return 1

    finally:
        logger.info("run finished at %s", datetime.now())

        level = args.verbose and logutil.INFO or logutil.DEBUG
        logger.log(level, "stat details:")
        stats.log_details(logger, level=level, prefix="  ")

        logger.info("result details:")
        results.log_details(logger, level=logutil.INFO, prefix="  ")

        logger.info("stat summary:")
        stats.log_summary(logger, level=logutil.INFO, prefix="  ")
        logger.info("result summary:")
        results.log_summary(logger, level=logutil.INFO, prefix="  ")

    return 0
Esempio n. 27
0
def main():

    parser = argparse.ArgumentParser(
        description="list test results",
        epilog=
        "By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately)."
    )
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument(
        "--quick",
        action="store_true",
        help=
        ("Use the previously generated '.console.txt' and '.console.diff' files"
         ))

    parser.add_argument(
        "--update",
        action="store_true",
        help=("Update the '.console.txt' and '.console.diff' files"))

    parser.add_argument("--dump-args", action="store_true")

    # how to parse --print directory,saved-directory,...?
    parser.add_argument(
        "--print",
        action="store",
        default=Print(Print.path, Print.result, Print.issues),
        type=Print,
        metavar=str(Print),
        help=
        "comman separate list of attributes to print for each test; default: '%(default)s'"
    )

    parser.add_argument(
        "--stats",
        action="store",
        default=Stats.summary,
        type=Stats,
        choices=[c for c in Stats],
        help="provide overview statistics; default: \"%(default)s\"")

    baseline_metavar = "BASELINE-DIRECTORY"
    baseline_help = "additional %(metavar)s containing results to compare against; any divergence between the test and baseline results are displayed"
    parser.add_argument("--baseline",
                        "-b",
                        metavar=baseline_metavar,
                        help=baseline_help)

    parser.add_argument(
        "--json",
        action="store_true",
        help=
        "output each result as an individual json object (pipe the output through 'jq -s .' to convert it to a well formed json list"
    )

    parser.add_argument(
        "directories",
        metavar="DIRECTORY-OR-FILE",
        nargs="+",
        help=
        "a directory containing: a test, testsuite, test output, or testsuite output; or a file containing a 'TESTLIST'"
    )

    # Note: this argument serves as documentation only.  The RESULT
    # argument should consumes all remaining parameters.
    parser.add_argument("baseline_ignored",
                        nargs="?",
                        metavar=baseline_metavar,
                        help=baseline_help)

    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)
    skip.add_arguments(parser)
    ignore.add_arguments(parser)

    # These three calls go together
    args = parser.parse_args()
    logutil.config(args, sys.stderr)
    logger = logutil.getLogger("kvmresults")

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0

    if args.dump_args:
        logger.info("Arguments:")
        logger.info("  Stats: %s", args.stats)
        logger.info("  Print: %s", args.print)
        logger.info("  Baseline: %s", args.baseline)
        logger.info("  Json: %s", args.json)
        logger.info("  Quick: %s", args.quick)
        logger.info("  Update: %s", args.update)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        skip.log_arguments(logger, args)
        ignore.log_arguments(logger, args)
        return 0

    # Try to find a baseline.  If present, pre-load it.
    baseline = None
    if args.baseline:
        # An explict baseline testsuite, can be more forgiving in how
        # it is loaded.
        baseline = testsuite.load(logger,
                                  logutil.DEBUG,
                                  args,
                                  testsuite_directory=args.baseline,
                                  error_level=logutil.DEBUG)
        if not baseline:
            # Perhaps the baseline just contains output, magic up the
            # corresponding testsuite directory.
            baseline_directory = os.path.join(args.testing_directory, "pluto")
            baseline = testsuite.load(logger,
                                      logutil.DEBUG,
                                      args,
                                      testsuite_directory=baseline_directory,
                                      testsuite_output_directory=args.baseline,
                                      error_level=logutil.DEBUG)
        if not baseline:
            logger.info("'%s' is not a baseline", args.baseline)
            return 1
    elif len(args.directories) > 1:
        # If there is more than one directory then, perhaps, the last
        # one is a baseline.  A baseline might be: a complete
        # testsuite snapshot; or just output saved as
        # testing/pluto/OUTPUT/TESTDIR.
        baseline = testsuite.load(logger,
                                  logutil.DEBUG,
                                  args,
                                  testsuite_directory=args.directories[-1])
        if baseline:
            # discard the last argument as consumed above.
            logger.debug("discarding baseline testsuite argument '%s'",
                         args.directories[-1])
            args.directories.pop()

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    result_stats = stats.Results()
    try:
        results(logger, tests, baseline, args, result_stats)
    finally:
        if args.stats is Stats.details:
            result_stats.log_details(stderr_log,
                                     header="Details:",
                                     prefix="  ")
        if args.stats in [Stats.details, Stats.summary]:
            result_stats.log_summary(stderr_log,
                                     header="Summary:",
                                     prefix="  ")

    return 0
Esempio n. 28
0
def main():

    parser = argparse.ArgumentParser(description="write 'table.json' to standard output")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("--quick", action="store_true",
                        help=("Use the previously generated '.console.txt' and '.console.diff' files"))
    parser.add_argument("--quick-sanitize", action="store_true",
                        help=("Use the previously generated '.console.txt' file"))
    parser.add_argument("--quick-diff", action="store_true",
                        help=("Use the previously generated '.console.diff' file"))

    parser.add_argument("--update", action="store_true",
                        help=("Update the '.console.txt' and '.console.diff' files"))
    parser.add_argument("--update-sanitize", action="store_true",
                        help=("Update the '.console.txt' file"))
    parser.add_argument("--update-diff", action="store_true",
                        help=("Update the '.console.diff' file"))

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="%(metavar)s containing: a test, a testsuite (contains a TESTLIST file), a TESTLIST file, test output, or testsuite output")

    post.add_arguments(parser)
    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)
    skip.add_arguments(parser)
    ignore.add_arguments(parser)

    args = parser.parse_args()

    logutil.config(args)
    logger = logutil.getLogger("kvmresults")

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    columns = [ "Test", "Expected", "Result", "Run time", "Responder", "..." ]
    rows = []

    for test in tests:

        sys.stderr.write("%s\n" % test.name)

        # Filter out tests that are being ignored?
        ignored, include_ignored, details = ignore.test(logger, args, test)
        if ignored:
            if not include_ignored:
                continue

        # Filter out tests that have not been run
        result = None
        if not ignored:
            result = post.mortem(test, args, baseline=None,
                                 output_directory=test.saved_output_directory,
                                 test_finished=None,
                                 skip_sanitize=args.quick or args.quick_sanitize,
                                 skip_diff=args.quick or args.quick_diff,
                                 update=args.update,
                                 update_sanitize=args.update_sanitize,
                                 update_diff=args.update_diff)
            if skip.result(logger, args, result):
                continue

        row = [
            test.name,
            test.expected_result, str(result),
            "run-time"
        ]
        for host in sorted(test.host_names):
            errors = result.errors.errors
            if host in errors:
                row.append("%s %s" % (host, " ".join(sorted(errors[host]))))
            else:
                row.append("%s passed" % (host))

        rows.append(row)

    summary = {
        "Total": 0,
        "passed": 0,
        "failed": 0,
        "abort": 0,
        "missing baseline": 0,
        "missing console output": 0,
        "missing OUTPUT": 0,
        "missing RESULT": 0,
        "ASSERT": 0,
        "CORE": 0,
        "EXPECT": 0,
        "GPFAULT": 0,
        "SEGFAULT": 0,
        "date": "0000-00-00",
        "dir": "testing/pluto",
        "runtime": 0,
        "runtime_str": "00:00:00",
    }

    table = {
        "suffix": "/OUTPUT",
        "summary": summary,
        "columns": columns,
        "rows": rows,
        "runDir": "???",
    }
    print(json.dumps(table, indent=2))
    return 0