コード例 #1
0
def results(logger, tests, baseline, args, result_stats):

    for test in tests:

        publish.json_status(logger, args, "rebuilding %s" % test.name)

        # If debug logging is enabled this will provide fine grained
        # per-test timing.
        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                continue

            # Filter out test results that are being skipped.
            #
            # XXX: In the default case (skip=[UNTESTED]) this should
            # be cheap (does OUTPUT/ exist?).  It isn't, instead a
            # full post-mortem analysis is performed.
            #
            # This is noticeable when printing static test value such
            # as the test's name takes far longer than one would
            # expect.
            result = post.mortem(test,
                                 args,
                                 baseline=baseline,
                                 output_directory=test.saved_output_directory,
                                 quick=args.quick)
            if args.update:
                result.save()
            if args.skip:
                if skip.result(logger, args, result):
                    result_stats.add_skipped(result)
                    continue
            result_stats.add_result(result)

            publish.test_files(logger, args, result)
            publish.test_output_files(logger, args, result)
            publish.json_result(logger, args, result)

            if baseline and post.Issues.CRASHED.isdisjoint(result.issues):
                # Since there is a baseline and the test didn't crash
                # limit what is printed to just those where the
                # baseline's result is different.
                #
                # Note that, this skips baseline-different - where the
                # baseline failed for a different reason.
                if {post.Issues.BASELINE_FAILED,
                        post.Issues.BASELINE_PASSED}.isdisjoint(result.issues):
                    continue

            b = args.json and printer.JsonBuilder(
                sys.stdout) or printer.TextBuilder(sys.stdout)
            printer.build_result(logger, result, baseline, args, args.print, b)

    publish.json_status(logger, args, "finished")
コード例 #2
0
ファイル: kvmresults.py プロジェクト: lzbgt/libreswan
def results(logger, tests, baseline, args, result_stats):

    for test in tests:

        publish.json_status(logger, args, "rebuilding %s" % test.name)

        # If debug logging is enabled this will provide fine grained
        # per-test timing.
        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                continue

            # Filter out test results that are being skipped.
            #
            # XXX: In the default case (skip=[UNTESTED]) this should
            # be cheap (does OUTPUT/ exist?).  It isn't, instead a
            # full post-mortem analysis is performed.
            #
            # This is noticable when printing static test value such
            # as the test's name takes far longer than one would
            # expect.
            result = post.mortem(test,
                                 args,
                                 baseline=baseline,
                                 output_directory=test.saved_output_directory,
                                 quick=args.quick,
                                 update=args.update)
            if args.skip:
                if skip.result(logger, args, result):
                    result_stats.add_skipped(result)
                    continue
            result_stats.add_result(result)

            publish.test_files(logger, args, result)
            publish.test_output_files(logger, args, result)
            publish.json_result(logger, args, result)

            # If there is a baseline; limit what is printed to just
            # those that differ.
            if baseline:
                baseline_issue = False
                for issue in result.issues:
                    if "baseline" in issue:
                        baseline_issue = True
                        break
                if not baseline_issue:
                    continue
            b = args.json and printer.JsonBuilder(
                sys.stdout) or printer.TextBuilder(sys.stdout)
            printer.build_result(logger, result, baseline, args, args.print, b)

    publish.json_status(logger, args, "finished")
コード例 #3
0
ファイル: kvmresults.py プロジェクト: libreswan/libreswan
def results(logger, tests, baseline, args, result_stats):

    for test in tests:

        publish.json_status(logger, args, "rebuilding %s" % test.name)

        # If debug logging is enabled this will provide fine grained
        # per-test timing.
        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                continue

            # Filter out test results that are being skipped.
            #
            # XXX: In the default case (skip=[UNTESTED]) this should
            # be cheap (does OUTPUT/ exist?).  It isn't, instead a
            # full post-mortem analysis is performed.
            #
            # This is noticeable when printing static test value such
            # as the test's name takes far longer than one would
            # expect.
            result = post.mortem(test, args,
                                 baseline=baseline,
                                 output_directory=test.saved_output_directory,
                                 quick=args.quick)
            if args.update:
                result.save()
            if args.skip:
                if skip.result(logger, args, result):
                    result_stats.add_skipped(result)
                    continue
            result_stats.add_result(result)

            publish.test_files(logger, args, result)
            publish.test_output_files(logger, args, result)
            publish.json_result(logger, args, result)

            if baseline and post.Issues.CRASHED.isdisjoint(result.issues):
                # Since there is a baseline and the test didn't crash
                # limit what is printed to just those where the
                # baseline's result is different.
                #
                # Note that, this skips baseline-different - where the
                # baseline failed for a different reason.
                if {post.Issues.BASELINE_FAILED, post.Issues.BASELINE_PASSED}.isdisjoint(result.issues):
                    continue

            b = args.json and printer.JsonBuilder(sys.stdout) or printer.TextBuilder(sys.stdout)
            printer.build_result(logger, result, baseline, args, args.print, b)

    publish.json_status(logger, args, "finished")
コード例 #4
0
ファイル: runner.py プロジェクト: Purple-wind-bell/libreswan
def run_tests(logger, args, tests, result_stats):
    if args.workers == 1:
        logger.info("using 1 worker thread to reboot domains")
    else:
        logger.info("using a pool of %s worker threads to reboot domains", args.workers)
    with futures.ThreadPoolExecutor(max_workers=args.workers) as boot_executor:
        domain_prefixes = args.prefix or [""]
        if args.parallel or len(domain_prefixes) > 1:
            logger.info("using the parallel test processor and domain prefixes %s", domain_prefixes)
            _parallel_test_processor(domain_prefixes, tests, args, result_stats, boot_executor, logger)
        else:
            domain_prefix = domain_prefixes[0]
            logger.info("using the serial test processor and domain prefix '%s'", domain_prefix)
            _serial_test_processor(domain_prefix, tests, args, result_stats, boot_executor, logger)
    publish.json_status(logger, args, "finished")
コード例 #5
0
ファイル: runner.py プロジェクト: libreswan/libreswan
def run_tests(logger, args, tests, test_stats, result_stats):
    if args.workers == 1:
        logger.info("using 1 worker thread to reboot domains")
    else:
        logger.info("using a pool of %s worker threads to reboot domains", args.workers)
    with futures.ThreadPoolExecutor(max_workers=args.workers) as boot_executor:
        domain_prefixes = args.prefix or [""]
        if args.parallel or len(domain_prefixes) > 1:
            logger.info("using the parallel test processor and domain prefixes %s", domain_prefixes)
            _parallel_test_processor(domain_prefixes, tests, args, test_stats, result_stats, boot_executor, logger)
        else:
            domain_prefix = domain_prefixes[0]
            logger.info("using the serial test processor and domain prefix '%s'", domain_prefix)
            _serial_test_processor(domain_prefix, tests, args, test_stats, result_stats, boot_executor, logger)
    publish.json_status(logger, args, "finished")
コード例 #6
0
def _process_test(domain_prefix, test, args, test_stats, result_stats,
                  test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_post_time = None
    old_result = None
    backup_directory = os.path.join(args.backup_directory, test.name)

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    publish.json_status(logger, args, "processing %s" % test_prefix)
    with logger.time("processing test %s", test_prefix):

        # always perform post mortem on the test directory.
        try:

            ignored, details = ignore.test(logger, args, test)
            if ignored:
                # If there is any pre-existing output move it to
                # backup.  Otherwise it looks like the test was run
                # when it wasn't (and besides, the output is no longer
                # applicable).
                #
                # The isdir() test followed by a simple move, while
                # racy, should be good enough.
                if os.path.isdir(test.output_directory):
                    logger.info("moving '%s' to '%s'", test.output_directory,
                                backup_directory)
                    os.makedirs(os.path.dirname(backup_directory),
                                exist_ok=True)
                    os.rename(test.output_directory, backup_directory)
                result_stats.add_ignored(test, ignored)
                test_stats.add(test, "ignored")
                logger.info("%s %s ignored (%s) %s", prefix, test_prefix,
                            details, suffix)
                return

            # Be lazy when gathering the results, don't run the
            # sanitizer or diff.  Let post.mortem figure out if the
            # test finished.

            old_result = post.mortem(test,
                                     args,
                                     domain_prefix=domain_prefix,
                                     quick=True)
            if skip.result(logger, args, old_result):
                logger.info("%s %s skipped (previously %s) %s", prefix,
                            test_prefix, old_result, suffix)
                test_stats.add(test, "skipped")
                result_stats.add_skipped(old_result)
                return

            if old_result:
                test_stats.add(test, "tests", "retry")
                logger.info("%s %s started (previously %s) ....", prefix,
                            test_prefix, old_result)
            else:
                test_stats.add(test, "tests", "try")
                logger.info("%s %s started ....", prefix, test_prefix)
            test_stats.add(test, "tests")

            # Create just the OUTPUT/ directory; if it already exists,
            # move any contents to BACKUP/.  Do it file-by-file so
            # that, at no point, the OUTPUT/ directory is missing
            # (having an OUTPUT/ directory implies the test was
            # started).
            #
            # Don't create the path.  If the parent directory is
            # missing, this will fail.
            #
            # By backing up each test just before it is started, a
            # trail of what tests were attempted during each run is
            # created.
            #
            # XXX:
            #
            # During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # By moving the directory contents to BACKUP/, which is
            # not under testing/pluto/ this problem is avoided.

            try:
                os.mkdir(test.output_directory)
            except FileNotFoundError:
                # Bail, something is messed up (for instance the parent directory doesn't exist).
                return
            except FileExistsError:
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, backup_directory)
                # Even if OUTPUT/ is empty, move it.
                os.makedirs(backup_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(backup_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    os.replace(src, dst)

            # Now that the OUTPUT directory is empty, start a debug
            # log writing to that directory; include timing for this
            # test run.

            with logger.debug_time("testing %s",
                                   test_prefix,
                                   logfile=os.path.join(
                                       test.output_directory, "debug.log"),
                                   loglevel=logutil.INFO) as test_runtime:

                # boot the domains
                with logger.time("booting domains") as test_boot_time:
                    try:
                        test_domains = _boot_test_domains(
                            logger, test, domain_prefix, boot_executor)
                    except pexpect.TIMEOUT:
                        logger.exception("timeout while booting domains")
                        # Bail.  Being unable to boot the domains is a
                        # disaster.  The test is UNRESOLVED.
                        return

                # Run the scripts directly
                with logger.time(
                        "running scripts %s",
                        " ".join(("%s:%s" % (host, script)) for host, script in
                                 test.host_script_tuples)) as test_script_time:
                    with tcpdump.Dump(
                            logger,
                            domain_prefix,
                            test.output_directory, [
                                test_domain.domain
                                for test_domain in test_domains.values()
                            ],
                            enable=args.tcpdump):

                        try:

                            # re-direct the test-result log file
                            for test_domain in test_domains.values():
                                output = os.path.join(
                                    test.output_directory,
                                    test_domain.domain.host_name +
                                    ".console.verbose.txt")
                                test_domain.console.output(open(output, "w"))

                            for host, script in test.host_script_tuples:
                                if args.stop_at == script:
                                    logger.error(
                                        "stopping test run at (before executing) script %s",
                                        script)
                                    break
                                test_domain = test_domains[host]
                                try:
                                    test_domain.read_file_run(script)
                                except BaseException as e:
                                    # if there is an exception, write
                                    # it to the console
                                    test_domain.console.child.logfile.write(
                                        "\n*** exception running script %s ***\n%s"
                                        % (script, str(e)))
                                    raise

                            for test_domain in test_domains.values():
                                test_domain.console.child.logfile.write(
                                    post.DONE)

                        except pexpect.TIMEOUT as e:
                            # A test ending with a timeout gets
                            # treated as unresolved.  Timeouts
                            # shouldn't occur so human intervention
                            # is required.
                            logger.error(
                                "**** timeout out while running test script %s ****",
                                script)

                        finally:

                            # Close the redirected test-result log files
                            logger.info(
                                "closing all the test domain log files")
                            for test_domain in test_domains.values():
                                outfile = test_domain.console.output()
                                outfile.close()

                            # Always disconnect from the test domains.
                            logger.info("closing all the test domains")
                            for test_domain in test_domains.values():
                                test_domain.close()

        finally:

            with logger.time("post-mortem %s", test_prefix):
                # The test finished; it is assumed that post.mortem
                # can deal with a crashed test.
                result = post.mortem(test,
                                     args,
                                     domain_prefix=domain_prefix,
                                     update=True)
                logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                            result.issues and " ", result.issues, suffix)

            # If the test was run (a fresh run would delete RESULT)
            # and finished (resolved in POSIX terminology), emit
            # enough JSON to fool scripts like pluto-testlist-scan.sh.
            #
            # A test that timed-out or crashed, isn't considered
            # resolved so the file isn't created.
            #
            # XXX: this should go away.

            if not os.path.isfile(test.result_file()) \
            and result.resolution.isresolved():
                RESULT = {
                    jsonutil.result.testname:
                    test.name,
                    jsonutil.result.expect:
                    test.status,
                    jsonutil.result.result:
                    result,
                    jsonutil.result.issues:
                    result.issues,
                    jsonutil.result.hosts:
                    test.host_names,
                    jsonutil.result.time:
                    jsonutil.ftime(test_runtime.start),
                    jsonutil.result.runtime:
                    round(test_runtime.seconds(), 1),
                    jsonutil.result.boot_time:
                    round(test_boot_time.seconds(), 1),
                    jsonutil.result.script_time:
                    round(test_script_time.seconds(), 1),
                    jsonutil.result.total_time:
                    round(test_runtime.seconds(), 1),
                }
                j = jsonutil.dumps(RESULT)
                logger.debug("filling '%s' with json: %s", test.result_file(),
                             j)
                with open(test.result_file(), "w") as f:
                    f.write(j)
                    f.write("\n")

            # Do this after RESULT is created so it too is published.
            publish.everything(logger, args, result)
            publish.json_status(logger, args, "finished %s" % test_prefix)

            test_stats.add(test, "tests", str(result))
            result_stats.add_result(result, old_result)
            # test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
            result_stats.log_summary(logger.info,
                                     header="updated test results:",
                                     prefix="  ")
コード例 #7
0
ファイル: runner.py プロジェクト: Telmate/libreswan
def _process_test(domain_prefix, test, args, test_stats, result_stats,
                  test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_post_time = None
    old_result = None
    backup_directory = os.path.join(args.backup_directory, test.name)

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    publish.json_status(logger, args, "processing %s" % test_prefix)
    with logger.time("processing test %s", test_prefix):

        # Ignoring the test completely?
        #
        # So that there's no possible confusion over the test being
        # run; remove any pre-existing output.

        ignored, details = ignore.test(logger, args, test)
        if ignored:
            # The isdir() test followed by a simple move, while
            # racy, should be good enough.
            if os.path.isdir(test.output_directory):
                logger.info("moving '%s' to '%s'", test.output_directory,
                            backup_directory)
                os.makedirs(os.path.dirname(backup_directory), exist_ok=True)
                os.rename(test.output_directory, backup_directory)
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details,
                        suffix)
            return

        # Skip the test, leaving old results?
        #
        # For instance, during a test re-run, skip any tests that are
        # passing.
        #
        # The check below compares the test and expected output,
        # ignoring any previous test result.  This way the results are
        # consistent with kvmresults.py which always reflects the
        # current sources.
        #
        # - modifying the expected output so that it no longer matches
        #   the last result is a fail
        #
        # - modifying the expected output so that it matches the last
        #   result is a pass

        old_result = post.mortem(test,
                                 args,
                                 domain_prefix=domain_prefix,
                                 quick=False)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s", prefix,
                        test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            publish.everything(logger, args, old_result)
            return

        # Running the test ...
        #
        # From now on the test will be run so need to perform post
        # mortem.

        try:

            if old_result:
                test_stats.add(test, "tests", "retry")
                logger.info("%s %s started (previously %s) ....", prefix,
                            test_prefix, old_result)
            else:
                test_stats.add(test, "tests", "try")
                logger.info("%s %s started ....", prefix, test_prefix)
            test_stats.add(test, "tests")

            # Create just the OUTPUT/ directory.
            #
            # If the directory already exists, copy the contents
            # BACKUP/.  Do it file-by-file so that, at no point, the
            # OUTPUT/ directory is missing (having an OUTPUT/
            # directory implies the test was started).
            #
            # Don't try to create the path.  If the parent directory
            # is missing, this and the entire script will crash.
            # Someone did something nasty like deleted the parent
            # directory.
            #
            # By backing up each test just before it is started,
            # leaves a trail of what tests were attempted during a
            # test run.
            #
            # XXX:
            #
            # During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # By moving the directory contents to BACKUP/, which is
            # not under testing/pluto/ this problem is avoided.

            try:
                os.mkdir(test.output_directory)
            except FileExistsError:
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, backup_directory)
                # Even if OUTPUT/ is empty, move it.
                os.makedirs(backup_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(backup_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    os.replace(src, dst)

            # Now that the OUTPUT directory is empty, start a debug
            # log writing to that directory; include timing for this
            # test run.

            with logger.debug_time("testing %s",
                                   test_prefix,
                                   logfile=os.path.join(
                                       test.output_directory, "debug.log"),
                                   loglevel=logutil.INFO) as test_runtime:

                # boot the domains
                with logger.time("booting domains") as test_boot_time:
                    try:
                        test_domains = _boot_test_domains(
                            logger, test, domain_prefix, boot_executor)
                    except pexpect.TIMEOUT:
                        # Bail.  Being unable to boot the domains is a
                        # disaster.  The test is UNRESOLVED.
                        logger.exception("TIMEOUT while booting domains")
                        return
                    except pexpect.EOF:
                        # Bail.  Being unable to attach to the domains
                        # is a disaster.  The test is UNRESOLVED.
                        logger.exception("EOF while booting domains")
                        return
                    except:
                        logger.exception("EXCEPTION while booting domains")
                        raise

                # Run the scripts directly
                with logger.time("running scripts %s",
                                 test.host_scripts) as test_script_time:
                    with tcpdump.Dump(
                            logger,
                            domain_prefix,
                            test.output_directory, [
                                test_domain.domain
                                for test_domain in test_domains.values()
                            ],
                            enable=args.tcpdump):

                        try:

                            # re-direct the test-result log file
                            for test_domain in test_domains.values():
                                output = os.path.join(
                                    test.output_directory,
                                    test_domain.domain.host_name +
                                    ".console.verbose.txt")
                                test_domain.console.redirect_output(
                                    open(output, "w"))

                            # If a script times out, don't try to run
                            # post-mortem.sh.
                            host_timed_out = None

                            for script in test.host_scripts:
                                test_domain = test_domains[script.host_name]
                                try:
                                    test_domain.read_file_run(script.path)
                                except pexpect.TIMEOUT as e:
                                    # A timeout while running a test
                                    # script is a sign that a command
                                    # hung.
                                    message = "%s while running script %s" % (
                                        post.Issues.TIMEOUT, script)
                                    logger.warning("*** %s ***" % message)
                                    test_domain.console.append_output(
                                        "%s %s %s", post.LHS, message,
                                        post.RHS)
                                    host_timed_out = script.host_name
                                    break
                                except pexpect.EOF as e:
                                    # An EOF while a script is running
                                    # is a sign that libvirt crashed.
                                    message = "%s while running script %s" % (
                                        post.Issues.EOF, script)
                                    logger.exception("*** %s ***" % message)
                                    test_domain.console.append_output(
                                        "%s %s %s", post.LHS, message,
                                        post.RHS)
                                    host_timed_out = script.host_name
                                    break
                                except BaseException as e:
                                    # if there is an exception, write
                                    # it to the console
                                    message = "%s %s while running script %s" % (
                                        post.Issues.EXCEPTION, str(e), script)
                                    logger.exception("*** %s ***" % message)
                                    test_domain.console.append_output(
                                        "\n%s %s %s\n", post.LHS, message,
                                        post.RHS)
                                    raise

                            if args.run_post_mortem is False:
                                logger.warning(
                                    "+++ skipping script post-mortem.sh -- disabled +++"
                                )
                            elif host_timed_out:
                                logger.warning(
                                    "+++ skipping script post-mortem.sh -- %s timed out +++"
                                    % (host_timed_out))
                            else:  # None or True
                                script = "../bin/post-mortem.sh"
                                for host_name in test.host_names:
                                    test_domain = test_domains[host_name]
                                    test_domain.console.append_output(
                                        "%s post-mortem %s", post.LHS,
                                        post.LHS)
                                    logger.info("running %s on %s", script,
                                                host_name)
                                    try:
                                        status = test_domain.console.run(
                                            script)
                                        if status:
                                            logger.error(
                                                "%s failed on %s with status %s",
                                                script, host_name, status)
                                        else:
                                            test_domain.console.append_output(
                                                "%s post-mortem %s", post.RHS,
                                                post.RHS)
                                    except pexpect.TIMEOUT as e:
                                        # A post-mortem ending with a
                                        # TIMEOUT gets treated as a
                                        # FAIL.
                                        message = "%s while running script %s" % (
                                            post.Issues.TIMEOUT, script)
                                        logger.warning("*** %s ***" % message)
                                        test_domain.console.append_output(
                                            "%s %s %s", post.LHS, message,
                                            post.RHS)
                                        continue  # always teardown
                                    except pexpect.EOF as e:
                                        # A post-mortem ending with an
                                        # EOF gets treated as
                                        # unresloved.
                                        message = "%s while running script %s" % (
                                            post.Issues.EOF, script)
                                        logger.exception("*** %s ***" %
                                                         message)
                                        test_domain.console.append_output(
                                            "%s %s %s", post.LHS, message,
                                            post.RHS)
                                        continue  # always teardown
                                    except BaseException as e:
                                        # if there is an exception, write
                                        # it to the console
                                        message = "%s %s while running script %s" % (
                                            post.Issues.EXCEPTION, str(e),
                                            script)
                                        logger.exception(message)
                                        test_domain.console.append_output(
                                            "\n%s %s %s\n", post.LHS, message,
                                            post.RHS)
                                        raise

                            for test_domain in test_domains.values():
                                test_domain.console.append_output(post.DONE)

                        finally:

                            # Close the redirected test-result log files
                            logger.info(
                                "closing all the test domain log files")
                            for test_domain in test_domains.values():
                                test_domain.console.close_output()

                            # Always disconnect from the test domains.
                            logger.info("closing all the test domains")
                            for test_domain in test_domains.values():
                                test_domain.close()

        finally:

            with logger.time("post-mortem %s", test_prefix):
                # The test finished; it is assumed that post.mortem
                # can deal with a crashed test.
                result = post.mortem(test, args, domain_prefix=domain_prefix)
                logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                            result.issues and " ", result.issues, suffix)

            result.save()

            # If the test was run (a fresh run would delete RESULT)
            # and finished (resolved in POSIX terminology), emit
            # enough JSON to fool scripts like pluto-testlist-scan.sh.
            #
            # A test that timed-out or crashed, isn't considered
            # resolved so the file isn't created.
            #
            # XXX: this should go away.

            result_file = os.path.join(test.output_directory, "RESULT")
            if not os.path.isfile(result_file) \
            and result.resolution.isresolved():
                RESULT = {
                    jsonutil.result.testname:
                    test.name,
                    jsonutil.result.expect:
                    test.status,
                    jsonutil.result.result:
                    result,
                    jsonutil.result.issues:
                    result.issues,
                    jsonutil.result.hosts:
                    test.host_names,
                    jsonutil.result.time:
                    jsonutil.ftime(test_runtime.start),
                    jsonutil.result.runtime:
                    round(test_runtime.seconds(), 1),
                    jsonutil.result.boot_time:
                    round(test_boot_time.seconds(), 1),
                    jsonutil.result.script_time:
                    round(test_script_time.seconds(), 1),
                }
                j = jsonutil.dumps(RESULT)
                logger.debug("filling '%s' with json: %s", result_file, j)
                with open(result_file, "w") as f:
                    f.write(j)
                    f.write("\n")

            # Do this after RESULT is created so it too is published.
            publish.everything(logger, args, result)
            publish.json_status(logger, args, "finished %s" % test_prefix)

            test_stats.add(test, "tests", str(result))
            result_stats.add_result(result, old_result)
            # test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
            result_stats.log_summary(logger.info,
                                     header="updated test results:",
                                     prefix="  ")
コード例 #8
0
def results(logger, tests, baseline, args, result_stats):

    failures = 0
    unresolved = 0
    passed = 0
    nr = 0

    for test in tests:

        nr = nr + 1
        publish.json_status(logger, args,
                            "rebuilding %s (test %d of %d)" % (test.name, nr, len(tests)))

        # If debug logging is enabled this will provide fine grained
        # per-test timing.

        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                continue

            # Filter out test results that are being skipped.
            #
            # XXX: In the default case (skip=[UNTESTED]) this should
            # be cheap (does OUTPUT/ exist?).  It isn't, instead a
            # full post-mortem analysis is performed.
            #
            # This is noticeable when printing static test value such
            # as the test's name takes far longer than one would
            # expect.
            result = post.mortem(test, args,
                                 baseline=baseline,
                                 output_directory=test.saved_output_directory,
                                 quick=args.quick)
            if args.update:
                result.save()
            if args.skip:
                if printer.Print.RESULT in args.print \
                and skip.result(logger, args, result):
                    result_stats.add_skipped(result)
                    continue
            result_stats.add_result(result)

            if result.resolution in [post.Resolution.PASSED,
                                     post.Resolution.UNTESTED,
                                     post.Resolution.UNSUPPORTED]:
                passed = passed + 1
            elif result.resolution in [post.Resolution.UNRESOLVED]:
                unresolved = unresolved + 1
            else:
                failures = failures + 1

            publish.test_files(logger, args, result)
            publish.test_output_files(logger, args, result)
            publish.json_result(logger, args, result)

            if baseline and test in baseline \
               and not result.issues.crashed():
                # Since there is a baseline and the test didn't crash
                # limit what is printed to just those where the
                # baseline's result is different.
                #
                # Note that, this skips baseline-different - where the
                # baseline failed for a different reason.
                if {post.Issues.BASELINE_FAILED, post.Issues.BASELINE_PASSED}.isdisjoint(result.issues):
                    continue

            b = args.json and printer.JsonBuilder(sys.stdout) or printer.TextBuilder(sys.stdout)
            printer.build_result(logger, result, baseline, args, args.print, b)

        publish.json_results(logger, args)
        publish.json_summary(logger, args)

    publish.json_status(logger, args, "finished")

    # exit code
    if args.exit_ok:
        return 0
    elif unresolved:
        return 125 # 'git bisect' magic for don't know
    elif failures:
        return 1
    else:
        return 0
コード例 #9
0
ファイル: runner.py プロジェクト: libreswan/libreswan
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_post_time = None
    old_result = None
    backup_directory = os.path.join(args.backup_directory, test.name)

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    publish.json_status(logger, args, "processing %s" % test_prefix)
    with logger.time("processing test %s", test_prefix):

        # Ignoring the test completely?
        #
        # So that there's no possible confusion over the test being
        # run; remove any pre-existing output.

        ignored, details = ignore.test(logger, args, test)
        if ignored:
            # The isdir() test followed by a simple move, while
            # racy, should be good enough.
            if os.path.isdir(test.output_directory):
                logger.info("moving '%s' to '%s'", test.output_directory,
                            backup_directory)
                os.makedirs(os.path.dirname(backup_directory), exist_ok=True)
                os.rename(test.output_directory, backup_directory)
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            logger.info("%s %s ignored (%s) %s",
                        prefix, test_prefix, details, suffix)
            return

        # Skip the test, leaving old results?
        #
        # For instance, during a test re-run, skip any tests that are
        # passing.
        #
        # The check below compares the test and expected output,
        # ignoring any previous test result.  This way the results are
        # consistent with kvmresults.py which always reflects the
        # current sources.
        #
        # - modifying the expected output so that it no longer matches
        #   the last result is a fail
        #
        # - modifying the expected output so that it matches the last
        #   result is a pass

        old_result = post.mortem(test, args, domain_prefix=domain_prefix, quick=False)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s",
                        prefix, test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            publish.everything(logger, args, old_result)
            return

        # Running the test ...
        #
        # From now on the test will be run so need to perform post
        # mortem.

        try:

            if old_result:
                test_stats.add(test, "tests", "retry")
                logger.info("%s %s started (previously %s) ....",
                            prefix, test_prefix, old_result)
            else:
                test_stats.add(test, "tests", "try")
                logger.info("%s %s started ....", prefix, test_prefix)
            test_stats.add(test, "tests")

            # Create just the OUTPUT/ directory.
            #
            # If the directory already exists, copy the contents
            # BACKUP/.  Do it file-by-file so that, at no point, the
            # OUTPUT/ directory is missing (having an OUTPUT/
            # directory implies the test was started).
            #
            # Don't try to create the path.  If the parent directory
            # is missing, this and the entire script will crash.
            # Someone did something nasty like deleted the parent
            # directory.
            #
            # By backing up each test just before it is started,
            # leaves a trail of what tests were attempted during a
            # test run.
            #
            # XXX:
            #
            # During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # By moving the directory contents to BACKUP/, which is
            # not under testing/pluto/ this problem is avoided.

            try:
                os.mkdir(test.output_directory)
            except FileExistsError:
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, backup_directory)
                # Even if OUTPUT/ is empty, move it.
                os.makedirs(backup_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(backup_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    os.replace(src, dst)

            # Now that the OUTPUT directory is empty, start a debug
            # log writing to that directory; include timing for this
            # test run.

            with logger.debug_time("testing %s", test_prefix,
                                   logfile=os.path.join(test.output_directory,
                                                        "debug.log"),
                                   loglevel=logutil.INFO) as test_runtime:

                # boot the domains
                with logger.time("booting domains") as test_boot_time:
                    try:
                        test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor)
                    except pexpect.TIMEOUT:
                        # Bail.  Being unable to boot the domains is a
                        # disaster.  The test is UNRESOLVED.
                        logger.exception("timeout while booting domains")
                        return
                    except pexpect.EOF:
                        # Bail.  Being unable to attach to the domains
                        # is a disaster.  The test is UNRESOLVED.
                        logger.exception("eof (disconnect) while booting domains")
                        return

                # Run the scripts directly
                with logger.time("running scripts %s",
                                 " ".join(("%s:%s" % (host, script))
                                          for host, script in test.host_script_tuples)) as test_script_time:
                    with tcpdump.Dump(logger, domain_prefix, test.output_directory,
                                      [test_domain.domain for test_domain in test_domains.values()],
                                      enable=args.tcpdump):

                        try:

                            # re-direct the test-result log file
                            for test_domain in test_domains.values():
                                output = os.path.join(test.output_directory,
                                                      test_domain.domain.host_name + ".console.verbose.txt")
                                test_domain.console.output(open(output, "w"))

                            for host, script in test.host_script_tuples:
                                if args.stop_at == script:
                                    logger.error("stopping test run at (before executing) script %s", script)
                                    break
                                test_domain = test_domains[host]
                                try:
                                    test_domain.read_file_run(script)
                                except pexpect.TIMEOUT as e:
                                    # A test ending with a timeout
                                    # gets treated as a FAIL.  A
                                    # timeout while running a test
                                    # script is a sign that a command
                                    # hung.
                                    post_timeout = "%s %s:%s" % (post.TIMEOUT, host, script)
                                    logger.warning("*** %s ***" % post_timeout)
                                    test_domain.console.child.logfile.write("%s %s %s" % (post.LHS, post_timeout, post.RHS))
                                    break
                                except BaseException as e:
                                    # if there is an exception, write
                                    # it to the console
                                    test_domain.console.child.logfile.write("\n%s %s %s:%s %rhs\n%s" % (post.LHS, post.EXCEPTION, host, script, post.RHS, str(e)))
                                    raise

                            for test_domain in test_domains.values():
                                test_domain.console.child.logfile.write(post.DONE)

                        finally:

                            # Close the redirected test-result log files
                            logger.info("closing all the test domain log files")
                            for test_domain in test_domains.values():
                                outfile = test_domain.console.output()
                                outfile.close()

                            # Always disconnect from the test domains.
                            logger.info("closing all the test domains")
                            for test_domain in test_domains.values():
                                test_domain.close()

        finally:

            with logger.time("post-mortem %s", test_prefix):
                # The test finished; it is assumed that post.mortem
                # can deal with a crashed test.
                result = post.mortem(test, args, domain_prefix=domain_prefix)
                logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                            result.issues and " ", result.issues, suffix)

            result.save()

            # If the test was run (a fresh run would delete RESULT)
            # and finished (resolved in POSIX terminology), emit
            # enough JSON to fool scripts like pluto-testlist-scan.sh.
            #
            # A test that timed-out or crashed, isn't considered
            # resolved so the file isn't created.
            #
            # XXX: this should go away.

            result_file = os.path.join(test.output_directory, "RESULT")
            if not os.path.isfile(result_file) \
            and result.resolution.isresolved():
                RESULT = {
                    jsonutil.result.testname: test.name,
                    jsonutil.result.expect: test.status,
                    jsonutil.result.result: result,
                    jsonutil.result.issues: result.issues,
                    jsonutil.result.hosts: test.host_names,
                    jsonutil.result.time: jsonutil.ftime(test_runtime.start),
                    jsonutil.result.runtime: round(test_runtime.seconds(), 1),
                    jsonutil.result.boot_time: round(test_boot_time.seconds(), 1),
                    jsonutil.result.script_time: round(test_script_time.seconds(), 1),
                }
                j = jsonutil.dumps(RESULT)
                logger.debug("filling '%s' with json: %s", result_file, j)
                with open(result_file, "w") as f:
                    f.write(j)
                    f.write("\n")

            # Do this after RESULT is created so it too is published.
            publish.everything(logger, args, result)
            publish.json_status(logger, args, "finished %s" % test_prefix)

            test_stats.add(test, "tests", str(result))
            result_stats.add_result(result, old_result)
            # test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
            result_stats.log_summary(logger.info, header="updated test results:", prefix="  ")