Ejemplo n.º 1
0
def _process_test(domain_prefix, test, args, test_stats, result_stats,
                  test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_post_time = None
    old_result = None
    backup_directory = os.path.join(args.backup_directory, test.name)

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    publish.json_status(logger, args, "processing %s" % test_prefix)
    with logger.time("processing test %s", test_prefix):

        # Ignoring the test completely?
        #
        # So that there's no possible confusion over the test being
        # run; remove any pre-existing output.

        ignored, details = ignore.test(logger, args, test)
        if ignored:
            # The isdir() test followed by a simple move, while
            # racy, should be good enough.
            if os.path.isdir(test.output_directory):
                logger.info("moving '%s' to '%s'", test.output_directory,
                            backup_directory)
                os.makedirs(os.path.dirname(backup_directory), exist_ok=True)
                os.rename(test.output_directory, backup_directory)
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details,
                        suffix)
            return

        # Skip the test, leaving old results?
        #
        # For instance, during a test re-run, skip any tests that are
        # passing.
        #
        # The check below compares the test and expected output,
        # ignoring any previous test result.  This way the results are
        # consistent with kvmresults.py which always reflects the
        # current sources.
        #
        # - modifying the expected output so that it no longer matches
        #   the last result is a fail
        #
        # - modifying the expected output so that it matches the last
        #   result is a pass

        old_result = post.mortem(test,
                                 args,
                                 domain_prefix=domain_prefix,
                                 quick=False)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s", prefix,
                        test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            publish.everything(logger, args, old_result)
            return

        # Running the test ...
        #
        # From now on the test will be run so need to perform post
        # mortem.

        try:

            if old_result:
                test_stats.add(test, "tests", "retry")
                logger.info("%s %s started (previously %s) ....", prefix,
                            test_prefix, old_result)
            else:
                test_stats.add(test, "tests", "try")
                logger.info("%s %s started ....", prefix, test_prefix)
            test_stats.add(test, "tests")

            # Create just the OUTPUT/ directory.
            #
            # If the directory already exists, copy the contents
            # BACKUP/.  Do it file-by-file so that, at no point, the
            # OUTPUT/ directory is missing (having an OUTPUT/
            # directory implies the test was started).
            #
            # Don't try to create the path.  If the parent directory
            # is missing, this and the entire script will crash.
            # Someone did something nasty like deleted the parent
            # directory.
            #
            # By backing up each test just before it is started,
            # leaves a trail of what tests were attempted during a
            # test run.
            #
            # XXX:
            #
            # During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # By moving the directory contents to BACKUP/, which is
            # not under testing/pluto/ this problem is avoided.

            try:
                os.mkdir(test.output_directory)
            except FileExistsError:
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, backup_directory)
                # Even if OUTPUT/ is empty, move it.
                os.makedirs(backup_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(backup_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    os.replace(src, dst)

            # Now that the OUTPUT directory is empty, start a debug
            # log writing to that directory; include timing for this
            # test run.

            with logger.debug_time("testing %s",
                                   test_prefix,
                                   logfile=os.path.join(
                                       test.output_directory, "debug.log"),
                                   loglevel=logutil.INFO) as test_runtime:

                # boot the domains
                with logger.time("booting domains") as test_boot_time:
                    try:
                        test_domains = _boot_test_domains(
                            logger, test, domain_prefix, boot_executor)
                    except pexpect.TIMEOUT:
                        # Bail.  Being unable to boot the domains is a
                        # disaster.  The test is UNRESOLVED.
                        logger.exception("TIMEOUT while booting domains")
                        return
                    except pexpect.EOF:
                        # Bail.  Being unable to attach to the domains
                        # is a disaster.  The test is UNRESOLVED.
                        logger.exception("EOF while booting domains")
                        return
                    except:
                        logger.exception("EXCEPTION while booting domains")
                        raise

                # Run the scripts directly
                with logger.time("running scripts %s",
                                 test.host_scripts) as test_script_time:
                    with tcpdump.Dump(
                            logger,
                            domain_prefix,
                            test.output_directory, [
                                test_domain.domain
                                for test_domain in test_domains.values()
                            ],
                            enable=args.tcpdump):

                        try:

                            # re-direct the test-result log file
                            for test_domain in test_domains.values():
                                output = os.path.join(
                                    test.output_directory,
                                    test_domain.domain.host_name +
                                    ".console.verbose.txt")
                                test_domain.console.redirect_output(
                                    open(output, "w"))

                            # If a script times out, don't try to run
                            # post-mortem.sh.
                            host_timed_out = None

                            for script in test.host_scripts:
                                test_domain = test_domains[script.host_name]
                                try:
                                    test_domain.read_file_run(script.path)
                                except pexpect.TIMEOUT as e:
                                    # A timeout while running a test
                                    # script is a sign that a command
                                    # hung.
                                    message = "%s while running script %s" % (
                                        post.Issues.TIMEOUT, script)
                                    logger.warning("*** %s ***" % message)
                                    test_domain.console.append_output(
                                        "%s %s %s", post.LHS, message,
                                        post.RHS)
                                    host_timed_out = script.host_name
                                    break
                                except pexpect.EOF as e:
                                    # An EOF while a script is running
                                    # is a sign that libvirt crashed.
                                    message = "%s while running script %s" % (
                                        post.Issues.EOF, script)
                                    logger.exception("*** %s ***" % message)
                                    test_domain.console.append_output(
                                        "%s %s %s", post.LHS, message,
                                        post.RHS)
                                    host_timed_out = script.host_name
                                    break
                                except BaseException as e:
                                    # if there is an exception, write
                                    # it to the console
                                    message = "%s %s while running script %s" % (
                                        post.Issues.EXCEPTION, str(e), script)
                                    logger.exception("*** %s ***" % message)
                                    test_domain.console.append_output(
                                        "\n%s %s %s\n", post.LHS, message,
                                        post.RHS)
                                    raise

                            if args.run_post_mortem is False:
                                logger.warning(
                                    "+++ skipping script post-mortem.sh -- disabled +++"
                                )
                            elif host_timed_out:
                                logger.warning(
                                    "+++ skipping script post-mortem.sh -- %s timed out +++"
                                    % (host_timed_out))
                            else:  # None or True
                                script = "../bin/post-mortem.sh"
                                for host_name in test.host_names:
                                    test_domain = test_domains[host_name]
                                    test_domain.console.append_output(
                                        "%s post-mortem %s", post.LHS,
                                        post.LHS)
                                    logger.info("running %s on %s", script,
                                                host_name)
                                    try:
                                        status = test_domain.console.run(
                                            script)
                                        if status:
                                            logger.error(
                                                "%s failed on %s with status %s",
                                                script, host_name, status)
                                        else:
                                            test_domain.console.append_output(
                                                "%s post-mortem %s", post.RHS,
                                                post.RHS)
                                    except pexpect.TIMEOUT as e:
                                        # A post-mortem ending with a
                                        # TIMEOUT gets treated as a
                                        # FAIL.
                                        message = "%s while running script %s" % (
                                            post.Issues.TIMEOUT, script)
                                        logger.warning("*** %s ***" % message)
                                        test_domain.console.append_output(
                                            "%s %s %s", post.LHS, message,
                                            post.RHS)
                                        continue  # always teardown
                                    except pexpect.EOF as e:
                                        # A post-mortem ending with an
                                        # EOF gets treated as
                                        # unresloved.
                                        message = "%s while running script %s" % (
                                            post.Issues.EOF, script)
                                        logger.exception("*** %s ***" %
                                                         message)
                                        test_domain.console.append_output(
                                            "%s %s %s", post.LHS, message,
                                            post.RHS)
                                        continue  # always teardown
                                    except BaseException as e:
                                        # if there is an exception, write
                                        # it to the console
                                        message = "%s %s while running script %s" % (
                                            post.Issues.EXCEPTION, str(e),
                                            script)
                                        logger.exception(message)
                                        test_domain.console.append_output(
                                            "\n%s %s %s\n", post.LHS, message,
                                            post.RHS)
                                        raise

                            for test_domain in test_domains.values():
                                test_domain.console.append_output(post.DONE)

                        finally:

                            # Close the redirected test-result log files
                            logger.info(
                                "closing all the test domain log files")
                            for test_domain in test_domains.values():
                                test_domain.console.close_output()

                            # Always disconnect from the test domains.
                            logger.info("closing all the test domains")
                            for test_domain in test_domains.values():
                                test_domain.close()

        finally:

            with logger.time("post-mortem %s", test_prefix):
                # The test finished; it is assumed that post.mortem
                # can deal with a crashed test.
                result = post.mortem(test, args, domain_prefix=domain_prefix)
                logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                            result.issues and " ", result.issues, suffix)

            result.save()

            # If the test was run (a fresh run would delete RESULT)
            # and finished (resolved in POSIX terminology), emit
            # enough JSON to fool scripts like pluto-testlist-scan.sh.
            #
            # A test that timed-out or crashed, isn't considered
            # resolved so the file isn't created.
            #
            # XXX: this should go away.

            result_file = os.path.join(test.output_directory, "RESULT")
            if not os.path.isfile(result_file) \
            and result.resolution.isresolved():
                RESULT = {
                    jsonutil.result.testname:
                    test.name,
                    jsonutil.result.expect:
                    test.status,
                    jsonutil.result.result:
                    result,
                    jsonutil.result.issues:
                    result.issues,
                    jsonutil.result.hosts:
                    test.host_names,
                    jsonutil.result.time:
                    jsonutil.ftime(test_runtime.start),
                    jsonutil.result.runtime:
                    round(test_runtime.seconds(), 1),
                    jsonutil.result.boot_time:
                    round(test_boot_time.seconds(), 1),
                    jsonutil.result.script_time:
                    round(test_script_time.seconds(), 1),
                }
                j = jsonutil.dumps(RESULT)
                logger.debug("filling '%s' with json: %s", result_file, j)
                with open(result_file, "w") as f:
                    f.write(j)
                    f.write("\n")

            # Do this after RESULT is created so it too is published.
            publish.everything(logger, args, result)
            publish.json_status(logger, args, "finished %s" % test_prefix)

            test_stats.add(test, "tests", str(result))
            result_stats.add_result(result, old_result)
            # test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
            result_stats.log_summary(logger.info,
                                     header="updated test results:",
                                     prefix="  ")
Ejemplo n.º 2
0
def _process_test(domain_prefix, test, args, test_stats, result_stats,
                  test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_post_time = None
    old_result = None
    backup_directory = os.path.join(args.backup_directory, test.name)

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    publish.json_status(logger, args, "processing %s" % test_prefix)
    with logger.time("processing test %s", test_prefix):

        # always perform post mortem on the test directory.
        try:

            ignored, details = ignore.test(logger, args, test)
            if ignored:
                # If there is any pre-existing output move it to
                # backup.  Otherwise it looks like the test was run
                # when it wasn't (and besides, the output is no longer
                # applicable).
                #
                # The isdir() test followed by a simple move, while
                # racy, should be good enough.
                if os.path.isdir(test.output_directory):
                    logger.info("moving '%s' to '%s'", test.output_directory,
                                backup_directory)
                    os.makedirs(os.path.dirname(backup_directory),
                                exist_ok=True)
                    os.rename(test.output_directory, backup_directory)
                result_stats.add_ignored(test, ignored)
                test_stats.add(test, "ignored")
                logger.info("%s %s ignored (%s) %s", prefix, test_prefix,
                            details, suffix)
                return

            # Be lazy when gathering the results, don't run the
            # sanitizer or diff.  Let post.mortem figure out if the
            # test finished.

            old_result = post.mortem(test,
                                     args,
                                     domain_prefix=domain_prefix,
                                     quick=True)
            if skip.result(logger, args, old_result):
                logger.info("%s %s skipped (previously %s) %s", prefix,
                            test_prefix, old_result, suffix)
                test_stats.add(test, "skipped")
                result_stats.add_skipped(old_result)
                return

            if old_result:
                test_stats.add(test, "tests", "retry")
                logger.info("%s %s started (previously %s) ....", prefix,
                            test_prefix, old_result)
            else:
                test_stats.add(test, "tests", "try")
                logger.info("%s %s started ....", prefix, test_prefix)
            test_stats.add(test, "tests")

            # Create just the OUTPUT/ directory; if it already exists,
            # move any contents to BACKUP/.  Do it file-by-file so
            # that, at no point, the OUTPUT/ directory is missing
            # (having an OUTPUT/ directory implies the test was
            # started).
            #
            # Don't create the path.  If the parent directory is
            # missing, this will fail.
            #
            # By backing up each test just before it is started, a
            # trail of what tests were attempted during each run is
            # created.
            #
            # XXX:
            #
            # During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # By moving the directory contents to BACKUP/, which is
            # not under testing/pluto/ this problem is avoided.

            try:
                os.mkdir(test.output_directory)
            except FileNotFoundError:
                # Bail, something is messed up (for instance the parent directory doesn't exist).
                return
            except FileExistsError:
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, backup_directory)
                # Even if OUTPUT/ is empty, move it.
                os.makedirs(backup_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(backup_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    os.replace(src, dst)

            # Now that the OUTPUT directory is empty, start a debug
            # log writing to that directory; include timing for this
            # test run.

            with logger.debug_time("testing %s",
                                   test_prefix,
                                   logfile=os.path.join(
                                       test.output_directory, "debug.log"),
                                   loglevel=logutil.INFO) as test_runtime:

                # boot the domains
                with logger.time("booting domains") as test_boot_time:
                    try:
                        test_domains = _boot_test_domains(
                            logger, test, domain_prefix, boot_executor)
                    except pexpect.TIMEOUT:
                        logger.exception("timeout while booting domains")
                        # Bail.  Being unable to boot the domains is a
                        # disaster.  The test is UNRESOLVED.
                        return

                # Run the scripts directly
                with logger.time(
                        "running scripts %s",
                        " ".join(("%s:%s" % (host, script)) for host, script in
                                 test.host_script_tuples)) as test_script_time:
                    with tcpdump.Dump(
                            logger,
                            domain_prefix,
                            test.output_directory, [
                                test_domain.domain
                                for test_domain in test_domains.values()
                            ],
                            enable=args.tcpdump):

                        try:

                            # re-direct the test-result log file
                            for test_domain in test_domains.values():
                                output = os.path.join(
                                    test.output_directory,
                                    test_domain.domain.host_name +
                                    ".console.verbose.txt")
                                test_domain.console.output(open(output, "w"))

                            for host, script in test.host_script_tuples:
                                if args.stop_at == script:
                                    logger.error(
                                        "stopping test run at (before executing) script %s",
                                        script)
                                    break
                                test_domain = test_domains[host]
                                try:
                                    test_domain.read_file_run(script)
                                except BaseException as e:
                                    # if there is an exception, write
                                    # it to the console
                                    test_domain.console.child.logfile.write(
                                        "\n*** exception running script %s ***\n%s"
                                        % (script, str(e)))
                                    raise

                            for test_domain in test_domains.values():
                                test_domain.console.child.logfile.write(
                                    post.DONE)

                        except pexpect.TIMEOUT as e:
                            # A test ending with a timeout gets
                            # treated as unresolved.  Timeouts
                            # shouldn't occur so human intervention
                            # is required.
                            logger.error(
                                "**** timeout out while running test script %s ****",
                                script)

                        finally:

                            # Close the redirected test-result log files
                            logger.info(
                                "closing all the test domain log files")
                            for test_domain in test_domains.values():
                                outfile = test_domain.console.output()
                                outfile.close()

                            # Always disconnect from the test domains.
                            logger.info("closing all the test domains")
                            for test_domain in test_domains.values():
                                test_domain.close()

        finally:

            with logger.time("post-mortem %s", test_prefix):
                # The test finished; it is assumed that post.mortem
                # can deal with a crashed test.
                result = post.mortem(test,
                                     args,
                                     domain_prefix=domain_prefix,
                                     update=True)
                logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                            result.issues and " ", result.issues, suffix)

            # If the test was run (a fresh run would delete RESULT)
            # and finished (resolved in POSIX terminology), emit
            # enough JSON to fool scripts like pluto-testlist-scan.sh.
            #
            # A test that timed-out or crashed, isn't considered
            # resolved so the file isn't created.
            #
            # XXX: this should go away.

            if not os.path.isfile(test.result_file()) \
            and result.resolution.isresolved():
                RESULT = {
                    jsonutil.result.testname:
                    test.name,
                    jsonutil.result.expect:
                    test.status,
                    jsonutil.result.result:
                    result,
                    jsonutil.result.issues:
                    result.issues,
                    jsonutil.result.hosts:
                    test.host_names,
                    jsonutil.result.time:
                    jsonutil.ftime(test_runtime.start),
                    jsonutil.result.runtime:
                    round(test_runtime.seconds(), 1),
                    jsonutil.result.boot_time:
                    round(test_boot_time.seconds(), 1),
                    jsonutil.result.script_time:
                    round(test_script_time.seconds(), 1),
                    jsonutil.result.total_time:
                    round(test_runtime.seconds(), 1),
                }
                j = jsonutil.dumps(RESULT)
                logger.debug("filling '%s' with json: %s", test.result_file(),
                             j)
                with open(test.result_file(), "w") as f:
                    f.write(j)
                    f.write("\n")

            # Do this after RESULT is created so it too is published.
            publish.everything(logger, args, result)
            publish.json_status(logger, args, "finished %s" % test_prefix)

            test_stats.add(test, "tests", str(result))
            result_stats.add_result(result, old_result)
            # test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
            result_stats.log_summary(logger.info,
                                     header="updated test results:",
                                     prefix="  ")
Ejemplo n.º 3
0
def main():

    parser = argparse.ArgumentParser(
        description="write 'table.json' to standard output")

    parser.add_argument("--verbose", "-v", action="store_true")

    parser.add_argument("directories",
                        metavar="OUTPUT-DIRECTORY",
                        nargs="+",
                        help="output directories containing RESULT files")

    args = parser.parse_args()

    # Force the order
    host_names = ["east", "west", "road", "north", "nic"]
    columns = ["Test", "Expected", "Result", "Run time"]
    for host in host_names:
        columns.append(host)
    rows = []

    first_time = last_time = None
    total = passed = failed = incomplete = good = 0

    for directory in args.directories:

        args.verbose and sys.stderr.write("%s\n" % (directory))

        d = directory

        if not path.isdir(d):
            sys.stderr.write("%s (%s) is not a directory\n" % (directory, d))
            return 1

        # work around python's basename - remove any trailing "/"
        if not path.basename(d):
            d = path.dirname(d)

        if path.basename(d) != "OUTPUT":
            # try <d>/OUTPUT
            t = path.join(d, "OUTPUT")
            if not path.isdir(t):
                sys.stderr.write("%s (%s) is not an OUTPUT directory\n" %
                                 (directory, d))
                return 1
            d = t

        result_file = path.join(d, "RESULT")
        debug_log = path.join(d, "debug.log")

        if not path.isfile(result_file) and not path.isfile(debug_log):
            sys.stderr.write("%s (%s) contains no results\n" % (directory, d))
            continue

        total += 1

        runtime = ""

        RESULT = {}

        # If the RESULT file exists, use that.
        if path.isfile(result_file):
            # The RESULT file contains lines of JSON.  The last is
            # the result, and within that is the runtime. field.
            last_line = None
            with open(result_file) as f:
                for line in f:
                    try:
                        j = jsonutil.loads(line)
                    except:
                        sys.stderr.write("%s: invalid json: <<%s>>\n" %
                                         (result_file, line))
                        break
                    if not j:
                        break
                    if "result" in j:
                        RESULT = j
                        break

        # The debug.log should contain start/end lines, even when
        # the test didn't finish properly.
        debug_start_time = ""
        debug_end_time = ""
        debug_runtime = ""
        if path.isfile(debug_log):
            with open(debug_log) as f:
                debug = f.read()
                debug_start_time = debug_time(r"starting debug log at (.*)$",
                                              debug)
                debug_end_time = debug_time(r"ending debug log at (.*)$",
                                            debug)
        if debug_start_time and debug_end_time:
            debug_runtime = round(
                (debug_end_time - debug_start_time).total_seconds(), 2)

        # fill in anyting that is missing

        # Relative path to this directory so that html can construct
        # link.
        RESULT[jsonutil.result.directory] = d

        # Testname from .../<testname>/OUTPUT.
        if not jsonutil.result.testname in RESULT:
            # Python dirname is really basename(dirname).
            RESULT[jsonutil.result.testname] = path.dirname(d)

        if not jsonutil.result.result in RESULT:
            RESULT[jsonutil.result.result] = "incomplete"
        if RESULT[jsonutil.result.result] == "passed":
            passed += 1
        elif RESULT[jsonutil.result.result] == "failed":
            failed += 1
        else:
            incomplete += 1

        if not jsonutil.result.expect in RESULT:
            RESULT[jsonutil.result.expect] = "good"
        if RESULT[jsonutil.result.expect] == "good":
            good += 1

        # this is the end-time
        if not jsonutil.result.time in RESULT and debug_end_time:
            RESULT[jsonutil.result.time] = jsonutil.ftime(debug_end_time)
        # having separate boottime and testtime would be nice
        if not jsonutil.result.runtime in RESULT and debug_runtime:
            RESULT[jsonutil.result.runtime] = debug_runtime

        # Update the total times

        end_time = ""
        if debug_end_time:
            end_time = debug_end_time
        elif jsonutil.result.time in RESULT:
            end_time = jsonutil.ptime(RESULT[jsonutil.result.time])

        start_time = ""
        if debug_start_time:
            start_time = debug_start_time
        elif end_time and runtime:
            start_time = end_time - timedelta(seconds=runtime)

        if start_time:
            if not first_time:
                first_time = start_time
            elif start_time < first_time:
                first_time = start_time

        if end_time:
            if not last_time:
                last_time = end_time
            elif end_time > last_time:
                last_time = end_time

        rows.append(RESULT)

    runtime = "00:00:00"
    if first_time and last_time:
        runtime = (last_time - first_time)
        runtime = str(timedelta(days=runtime.days, seconds=runtime.seconds))

    date = jsonutil.ftime(datetime.fromordinal(1))
    if first_time:
        date = jsonutil.ftime(first_time)

    summary = {
        jsonutil.summary.total: total,
        jsonutil.summary.passed: passed,
        jsonutil.summary.failed: failed,
        jsonutil.summary.incomplete: incomplete,
        jsonutil.summary.date: date,
        jsonutil.summary.runtime: runtime,
        jsonutil.summary.good: good,
    }

    table = {
        jsonutil.results.summary: summary,
        jsonutil.results.table: rows,
    }
    jsonutil.dump(table, sys.stdout)
    sys.stdout.write("\n")

    return 0
Ejemplo n.º 4
0
def _process_test(domain_prefix, test, args, test_stats, result_stats,
                  test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_start_time = test_total_time = None

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    with logger.time("processing test %s", test_prefix) as test_total_time:

        ignored, include_ignored, details = ignore.test(logger, args, test)
        if ignored and not include_ignored:
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            # No need to log all the ignored tests when an explicit
            # sub-set of tests is being run.  For instance, when running
            # just one test.
            if not args.test_name:
                logger.info("$s %s ignored (%s) %s", prefix, test_prefix,
                            details, suffix)
            return

        # Be lazy with gathering the results, don't run the sanitizer or
        # diff.
        #
        # XXX: There is a bug here where the only difference is white
        # space.  The test will show up as failed when it previously
        # showed up as a white-space pass.
        #
        # The presence of the RESULT file is a proxy for detecting that
        # the test was incomplete.

        old_result = post.mortem(test,
                                 args,
                                 test_finished=None,
                                 skip_diff=True,
                                 skip_sanitize=True)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s", prefix,
                        test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            return

        if old_result:
            test_stats.add(test, "tests", "retry")
            logger.info("%s %s started (previously %s) ....", prefix,
                        test_prefix, old_result)
        else:
            test_stats.add(test, "tests", "try")
            logger.info("%s %s started ....", prefix, test_prefix)
        test_stats.add(test, "tests")

        # Create the OUTPUT/ directory; if it already exists, move any
        # contents to BACKUP/.  Do it file-by-file so that, at no
        # point, the OUTPUT/ directory missing (presence of OUTPUT/
        # implies the test was started).
        #
        # By backing up each test just before it is started, a trail
        # of what tests were attempted during each run is created.
        #
        # XXX:
        #
        # During boot, swan-transmogrify runs "chcon -R
        # testing/pluto".  Of course this means that each time a test
        # is added and/or a test is run (adding files under
        # <test>/OUTPUT), the boot process (and consequently the time
        # taken to run a test) keeps increasing.
        #
        # By moving the directory contents to BACKUP/, which is not
        # under testing/pluto/ this problem is avoided.

        try:
            os.mkdir(test.output_directory)
        except FileExistsError:
            backup_directory = os.path.join(args.backup_directory, test.name)
            logger.info("moving contents of '%s' to '%s'",
                        test.output_directory, backup_directory)
            # Even if OUTPUT/ is empty, copy it.
            os.makedirs(backup_directory, exist_ok=True)
            for name in os.listdir(test.output_directory):
                src = os.path.join(test.output_directory, name)
                dst = os.path.join(backup_directory, name)
                logger.debug("moving '%s' to '%s'", src, dst)
                os.replace(src, dst)

        # Now that the OUTPUT directory is empty, start a debug log
        # writing to that directory; include timing for this test run.

        with logger.debug_time("testing %s",
                               test_prefix,
                               logfile=os.path.join(test.output_directory,
                                                    "debug.log"),
                               loglevel=logutil.INFO) as test_runtime:

            with logger.time("booting domains") as test_boot_time:
                try:
                    test_domains = _boot_test_domains(logger, test,
                                                      domain_prefix,
                                                      boot_executor)
                except pexpect.TIMEOUT:
                    logger.exception("timeout while booting domains",
                                     test.name)
                    # Bail before RESULT is written - being unable to
                    # boot the domains is a disaster.
                    return

            # Run the scripts directly
            with logger.time(
                    "running scripts %s",
                    " ".join(("%s:%s" % (host, script)) for host, script in
                             test.host_script_tuples)) as test_script_time:
                try:

                    # re-direct the test-result log file
                    for test_domain in test_domains.values():
                        output = os.path.join(
                            test.output_directory,
                            test_domain.domain.host_name +
                            ".console.verbose.txt")
                        test_domain.console.output(open(output, "w"))

                    for host, script in test.host_script_tuples:
                        if args.stop_at == script:
                            logger.error(
                                "stopping test run at (before executing) script %s",
                                script)
                            break
                        test_domain = test_domains[host]
                        test_domain.read_file_run(script)
                    result = post.mortem(test,
                                         args,
                                         test_finished=True,
                                         update=True)

                except pexpect.TIMEOUT as e:
                    logger.exception(
                        "**** timeout out while running script %s ****",
                        script)
                    # Still peform post-mortem so that errors are
                    # captured, but force the result to
                    # incomplete.
                    result = post.mortem(test,
                                         args,
                                         test_finished=False,
                                         update=True)

                finally:

                    # Close the redirected test-result log files
                    for test_domain in test_domains.values():
                        logfile = test_domain.console.output()
                        logfile.close()

                    # Always disconnect from the test domains.
                    logger.info("closing all test domains")
                    for test_domain in test_domains.values():
                        logfile = test_domain.console.output()
                        if logfile:
                            logfile.close()
                        test_domain.close()

    # Above will have set RESULT.  Exceptions such as control-c or
    # a crash bypass this code.

    logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.errors
                and " ", result.errors, suffix)

    # Since the test finished, emit enough JSON to fool scripts like
    # pluto-testlist-scan.sh.
    #
    # This also leaves a simple marker to indicate that the test
    # finished.
    #
    # A more robust way of doing this would be to mark each of the
    # console logs as complete as it is closed.
    #
    # More detailed information can be extracted from the debug.log.
    hosts = {}
    for host in sorted(test.host_names):
        if host in result.errors:
            hosts[host] = [error for error in result.errors[host]]
        else:
            hosts[host] = ["passed"]
    RESULT = {
        jsonutil.result.testname: test.name,
        jsonutil.result.expect: test.expected_result,
        jsonutil.result.result: str(result),
        jsonutil.result.time: jsonutil.ftime(datetime.now()),
        jsonutil.result.runtime: round(test_runtime.seconds(), 1),
        jsonutil.result.boot_time: round(test_boot_time.seconds(), 1),
        jsonutil.result.script_time: round(test_script_time.seconds(), 1),
        jsonutil.result.total_time: round(test_total_time.seconds(), 1),
        jsonutil.result.hosts: hosts,
    }
    j = jsonutil.dumps(RESULT)
    logger.info("filling '%s' with json: %s", test.result_file(), j)
    with open(test.result_file(), "w") as f:
        f.write(j)
        f.write("\n")

    test_stats.add(test, "tests", str(result))
    result_stats.add_result(result, old_result)
    test_stats.log_summary(logger.info,
                           header="updated test stats:",
                           prefix="  ")
    result_stats.log_summary(logger.info,
                             header="updated test results:",
                             prefix="  ")
Ejemplo n.º 5
0
def main():

    parser = argparse.ArgumentParser(description="write 'table.json' to standard output")

    parser.add_argument("--verbose", "-v", action="store_true")

    parser.add_argument("directories", metavar="OUTPUT-DIRECTORY", nargs="+",
                        help="output directories containing RESULT files")

    args = parser.parse_args()

    # Force the order
    host_names = [ "east", "west", "road", "north", "nic" ]
    columns = [ "Test", "Expected", "Result", "Run time" ]
    for host in host_names:
        columns.append(host)
    rows = []

    first_time = last_time = None
    total = passed = failed = incomplete = good = 0

    for directory in args.directories:

        args.verbose and sys.stderr.write("%s\n" % (directory))

        d = directory

        if not path.isdir(d):
            sys.stderr.write("%s (%s) is not a directory\n" % (directory, d))
            return 1

        # work around python's basename - remove any trailing "/"
        if not path.basename(d):
            d = path.dirname(d)

        if path.basename(d) != "OUTPUT":
            # try <d>/OUTPUT
            t = path.join(d, "OUTPUT")
            if not path.isdir(t):
                sys.stderr.write("%s (%s) is not an OUTPUT directory\n" % (directory, d))
                return 1
            d = t

        result_file = path.join(d, "RESULT")
        debug_log = path.join(d, "debug.log")

        if not path.isfile(result_file) and not path.isfile(debug_log):
            sys.stderr.write("%s (%s) contains no results\n" % (directory, d))
            continue

        total += 1

        runtime = ""

        RESULT = {}

        # If the RESULT file exists, use that.
        if path.isfile(result_file):
            # The RESULT file contains lines of JSON.  The last is
            # the result, and within that is the runtime. field.
            last_line = None
            with open(result_file) as f:
                for line in f:
                    try:
                        j = jsonutil.loads(line)
                    except:
                        sys.stderr.write("%s: invalid json: <<%s>>\n" % (result_file, line))
                        break
                    if not j:
                        break
                    if "result" in j:
                        RESULT = j
                        break

        # The debug.log should contain start/end lines, even when
        # the test didn't finish properly.
        debug_start_time = ""
        debug_end_time = ""
        debug_runtime = ""
        if path.isfile(debug_log):
            with open(debug_log) as f:
                debug = f.read()
                debug_start_time = debug_time(r"starting debug log at (.*)$", debug)
                debug_end_time = debug_time(r"ending debug log at (.*)$", debug)
        if debug_start_time and debug_end_time:
            debug_runtime = round((debug_end_time - debug_start_time).total_seconds(), 2)

        # fill in anyting that is missing

        # Relative path to this directory so that html can construct
        # link.
        RESULT[jsonutil.result.directory] = d

        # Testname from .../<testname>/OUTPUT.
        if not jsonutil.result.testname in RESULT:
            # Python dirname is really basename(dirname).
            RESULT[jsonutil.result.testname] = path.dirname(d)

        if not jsonutil.result.result in RESULT:
            RESULT[jsonutil.result.result] = "incomplete"
        if RESULT[jsonutil.result.result] == "passed":
            passed += 1
        elif RESULT[jsonutil.result.result] == "failed":
            failed += 1
        else:
            incomplete += 1

        if not jsonutil.result.expect in RESULT:
            RESULT[jsonutil.result.expect] = "good"
        if RESULT[jsonutil.result.expect] == "good":
            good += 1

        # this is the end-time
        if not jsonutil.result.time in RESULT and debug_end_time:
            RESULT[jsonutil.result.time] = jsonutil.ftime(debug_end_time)
        # having separate boottime and testtime would be nice
        if not jsonutil.result.runtime in RESULT and debug_runtime:
            RESULT[jsonutil.result.runtime] = debug_runtime

        # Update the total times

        end_time = ""
        if debug_end_time:
            end_time = debug_end_time
        elif jsonutil.result.time in RESULT:
            end_time = jsonutil.ptime(RESULT[jsonutil.result.time])

        start_time = ""
        if debug_start_time:
            start_time = debug_start_time
        elif end_time and runtime:
            start_time = end_time - timedelta(seconds=runtime)

        if start_time:
            if not first_time:
                first_time = start_time
            elif start_time < first_time:
                first_time = start_time

        if end_time:
            if not last_time:
                last_time = end_time
            elif end_time > last_time:
                last_time = end_time

        rows.append(RESULT)

    runtime = "00:00:00"
    if first_time and last_time:
        runtime = (last_time - first_time)
        runtime = str(timedelta(days=runtime.days,seconds=runtime.seconds))

    date = jsonutil.ftime(datetime.fromordinal(1))
    if first_time:
        date = jsonutil.ftime(first_time)

    summary = {
        jsonutil.summary.total: total,
        jsonutil.summary.passed: passed,
        jsonutil.summary.failed: failed,
        jsonutil.summary.incomplete: incomplete,
        jsonutil.summary.date: date,
        jsonutil.summary.runtime: runtime,
        jsonutil.summary.good: good,
    }

    table = {
        jsonutil.results.summary: summary,
        jsonutil.results.table: rows,
    }
    jsonutil.dump(table, sys.stdout)
    sys.stdout.write("\n")

    return 0
Ejemplo n.º 6
0
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_start_time = test_total_time = None

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    with logger.time("processing test %s", test_prefix) as test_total_time:

        ignored, include_ignored, details = ignore.test(logger, args, test)
        if ignored and not include_ignored:
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            # No need to log all the ignored tests when an explicit
            # sub-set of tests is being run.  For instance, when running
            # just one test.
            if not args.test_name:
                logger.info("$s %s ignored (%s) %s",
                            prefix, test_prefix, details, suffix)
            return

        # Be lazy with gathering the results, don't run the sanitizer or
        # diff.
        #
        # XXX: There is a bug here where the only difference is white
        # space.  The test will show up as failed when it previously
        # showed up as a white-space pass.
        #
        # The presence of the RESULT file is a proxy for detecting that
        # the test was incomplete.

        old_result = post.mortem(test, args, test_finished=None,
                                 skip_diff=True, skip_sanitize=True)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s",
                        prefix, test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            return

        if old_result:
            test_stats.add(test, "tests", "retry")
            logger.info("%s %s started (previously %s) ....",
                        prefix, test_prefix, old_result)
        else:
            test_stats.add(test, "tests", "try")
            logger.info("%s %s started ....", prefix, test_prefix)
        test_stats.add(test, "tests")

        # Create the OUTPUT/ directory; if it already exists, move any
        # contents to BACKUP/.  Do it file-by-file so that, at no
        # point, the OUTPUT/ directory missing (presence of OUTPUT/
        # implies the test was started).
        #
        # By backing up each test just before it is started, a trail
        # of what tests were attempted during each run is created.
        #
        # XXX:
        #
        # During boot, swan-transmogrify runs "chcon -R
        # testing/pluto".  Of course this means that each time a test
        # is added and/or a test is run (adding files under
        # <test>/OUTPUT), the boot process (and consequently the time
        # taken to run a test) keeps increasing.
        #
        # By moving the directory contents to BACKUP/, which is not
        # under testing/pluto/ this problem is avoided.

        try:
            os.mkdir(test.output_directory)
        except FileExistsError:
            backup_directory = os.path.join(args.backup_directory, test.name)
            logger.info("moving contents of '%s' to '%s'",
                        test.output_directory, backup_directory)
            # Even if OUTPUT/ is empty, copy it.
            os.makedirs(backup_directory, exist_ok=True)
            for name in os.listdir(test.output_directory):
                src = os.path.join(test.output_directory, name)
                dst = os.path.join(backup_directory, name)
                logger.debug("moving '%s' to '%s'", src, dst)
                os.replace(src, dst)

        # Now that the OUTPUT directory is empty, start a debug log
        # writing to that directory; include timing for this test run.

        with logger.debug_time("testing %s", test_prefix,
                               logfile=os.path.join(test.output_directory,
                                                    "debug.log"),
                               loglevel=logutil.INFO) as test_runtime:

            with logger.time("booting domains") as test_boot_time:
                try:
                    test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor)
                except pexpect.TIMEOUT:
                    logger.exception("timeout while booting domains", test.name)
                    # Bail before RESULT is written - being unable to
                    # boot the domains is a disaster.
                    return


            # Run the scripts directly
            with logger.time("running scripts %s",
                             " ".join(("%s:%s" % (host, script))
                                      for host, script in test.host_script_tuples)) as test_script_time:
                try:

                    # re-direct the test-result log file
                    for test_domain in test_domains.values():
                        output = os.path.join(test.output_directory,
                                              test_domain.domain.host_name + ".console.verbose.txt")
                        test_domain.console.output(open(output, "w"))

                    for host, script in test.host_script_tuples:
                        if args.stop_at == script:
                            logger.error("stopping test run at (before executing) script %s", script)
                            break
                        test_domain = test_domains[host]
                        test_domain.read_file_run(script)
                    result = post.mortem(test, args, test_finished=True, update=True)

                except pexpect.TIMEOUT as e:
                    logger.exception("**** timeout out while running script %s ****", script)
                    # Still peform post-mortem so that errors are
                    # captured, but force the result to
                    # incomplete.
                    result = post.mortem(test, args, test_finished=False, update=True)

                finally:

                    # Close the redirected test-result log files
                    for test_domain in test_domains.values():
                        logfile = test_domain.console.output()
                        logfile.close()

                    # Always disconnect from the test domains.
                    logger.info("closing all test domains")
                    for test_domain in test_domains.values():
                        logfile = test_domain.console.output()
                        if logfile:
                            logfile.close()
                        test_domain.close()

    # Above will have set RESULT.  Exceptions such as control-c or
    # a crash bypass this code.

    logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                result.errors and " ", result.errors, suffix)

    # Since the test finished, emit enough JSON to fool scripts like
    # pluto-testlist-scan.sh.
    #
    # This also leaves a simple marker to indicate that the test
    # finished.
    #
    # A more robust way of doing this would be to mark each of the
    # console logs as complete as it is closed.
    #
    # More detailed information can be extracted from the debug.log.
    hosts = {}
    for host in sorted(test.host_names):
        if host in result.errors:
            hosts[host] = [error for error in result.errors[host]]
        else:
            hosts[host] = ["passed"]
    RESULT = {
        jsonutil.result.testname: test.name,
        jsonutil.result.expect: test.expected_result,
        jsonutil.result.result: str(result),
        jsonutil.result.time: jsonutil.ftime(datetime.now()),
        jsonutil.result.runtime: round(test_runtime.seconds(), 1),
        jsonutil.result.boot_time: round(test_boot_time.seconds(), 1),
        jsonutil.result.script_time: round(test_script_time.seconds(), 1),
        jsonutil.result.total_time: round(test_total_time.seconds(), 1),
        jsonutil.result.hosts: hosts,
    }
    j = jsonutil.dumps(RESULT)
    logger.info("filling '%s' with json: %s", test.result_file(), j)
    with open(test.result_file(), "w") as f:
        f.write(j)
        f.write("\n")

    test_stats.add(test, "tests", str(result))
    result_stats.add_result(result, old_result)
    test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
    result_stats.log_summary(logger.info, header="updated test results:", prefix="  ")
Ejemplo n.º 7
0
def _process_test(domain_prefix, test, args, test_stats, result_stats,
                  test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_total_time = None

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    with logger.time("processing test %s", test_prefix) as test_total_time:

        ignored, details = ignore.test(logger, args, test)
        if ignored:
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            logger.info("%s %s ignored (%s) %s", prefix, test_prefix, details,
                        suffix)
            return

        # Be lazy when gathering the results, don't run the sanitizer
        # or diff.  Let post.mortem figure out if the test finished.

        old_result = post.mortem(test,
                                 args,
                                 domain_prefix=domain_prefix,
                                 quick=True,
                                 finished=None)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s", prefix,
                        test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            return

        if old_result:
            test_stats.add(test, "tests", "retry")
            logger.info("%s %s started (previously %s) ....", prefix,
                        test_prefix, old_result)
        else:
            test_stats.add(test, "tests", "try")
            logger.info("%s %s started ....", prefix, test_prefix)
        test_stats.add(test, "tests")

        # Create just the OUTPUT/ directory; if it already exists,
        # move any contents to BACKUP/.  Do it file-by-file so that,
        # at no point, the OUTPUT/ directory is missing (having an
        # OUTPUT/ directory implies the test was started).
        #
        # Don't create the path.  If the parent directory is missing,
        # this will fail.
        #
        # By backing up each test just before it is started, a trail
        # of what tests were attempted during each run is created.
        #
        # XXX:
        #
        # During boot, swan-transmogrify runs "chcon -R
        # testing/pluto".  Of course this means that each time a test
        # is added and/or a test is run (adding files under
        # <test>/OUTPUT), the boot process (and consequently the time
        # taken to run a test) keeps increasing.
        #
        # By moving the directory contents to BACKUP/, which is not
        # under testing/pluto/ this problem is avoided.

        try:
            os.mkdir(test.output_directory)
        except FileNotFoundError:
            # Bail, something is messed up (for instance the parent directory doesn't exist).
            return
        except FileExistsError:
            backup_directory = os.path.join(args.backup_directory, test.name)
            logger.info("moving contents of '%s' to '%s'",
                        test.output_directory, backup_directory)
            # Even if OUTPUT/ is empty, copy it.
            os.makedirs(backup_directory, exist_ok=True)
            for name in os.listdir(test.output_directory):
                src = os.path.join(test.output_directory, name)
                dst = os.path.join(backup_directory, name)
                logger.debug("moving '%s' to '%s'", src, dst)
                os.replace(src, dst)

        # Now that the OUTPUT directory is empty, start a debug log
        # writing to that directory; include timing for this test run.

        with logger.debug_time("testing %s",
                               test_prefix,
                               logfile=os.path.join(test.output_directory,
                                                    "debug.log"),
                               loglevel=logutil.INFO) as test_runtime:

            with logger.time("booting domains") as test_boot_time:
                try:
                    test_domains = _boot_test_domains(logger, test,
                                                      domain_prefix,
                                                      boot_executor)
                except pexpect.TIMEOUT:
                    logger.exception("timeout while booting domains")
                    # Bail before RESULT is written - being unable to
                    # boot the domains is a disaster.  The test is
                    # UNRESOLVED.
                    return

            # Run the scripts directly
            with logger.time(
                    "running scripts %s",
                    " ".join(("%s:%s" % (host, script)) for host, script in
                             test.host_script_tuples)) as test_script_time:
                with tcpdump.Dump(logger,
                                  domain_prefix,
                                  test.output_directory, [
                                      test_domain.domain
                                      for test_domain in test_domains.values()
                                  ],
                                  enable=args.tcpdump):

                    try:

                        # re-direct the test-result log file
                        for test_domain in test_domains.values():
                            output = os.path.join(
                                test.output_directory,
                                test_domain.domain.host_name +
                                ".console.verbose.txt")
                            test_domain.console.output(open(output, "w"))

                        try:
                            for host, script in test.host_script_tuples:
                                if args.stop_at == script:
                                    logger.error(
                                        "stopping test run at (before executing) script %s",
                                        script)
                                    break
                                test_domain = test_domains[host]
                                test_domain.read_file_run(script)
                        except pexpect.TIMEOUT as e:
                            # A test ending with a timeout is still a
                            # finished test.  Analysis of the results
                            # will detect this and flag it as a fail.
                            logger.error(
                                "**** timeout out while running script %s ****",
                                script)

                    finally:

                        # Close the redirected test-result log files
                        for test_domain in test_domains.values():
                            logfile = test_domain.console.output()
                            logfile.close()

                        # Always disconnect from the test domains.
                        logger.info("closing all test domains")
                        for test_domain in test_domains.values():
                            logfile = test_domain.console.output()
                            if logfile:
                                logfile.close()
                            test_domain.close()

    # The test finished.  Aborts such as a failed boot, or a timeout,
    # will skip all the below.

    result = post.mortem(test,
                         args,
                         domain_prefix=domain_prefix,
                         finished=True,
                         update=True)

    logger.info("%s %s %s%s%s %s", prefix, test_prefix, result, result.issues
                and " ", result.issues, suffix)

    # Since the the test finished (resolved in POSIX terminology)",
    # emit enough JSON to fool scripts like pluto-testlist-scan.sh.
    #
    # A test that timed-out or crashed, isn't considered resolved.
    #
    # A more robust way of doing this would be to mark each of the
    # console logs as complete as it is closed.
    #
    # More detailed information can be extracted from the debug.log.

    RESULT = {
        jsonutil.result.testname: test.name,
        jsonutil.result.expect: test.status,
        jsonutil.result.result: result,
        jsonutil.result.issues: result.issues,
        jsonutil.result.hosts: test.host_names,
        jsonutil.result.time: jsonutil.ftime(test_total_time.start),
        jsonutil.result.runtime: round(test_runtime.seconds(), 1),
        jsonutil.result.boot_time: round(test_boot_time.seconds(), 1),
        jsonutil.result.script_time: round(test_script_time.seconds(), 1),
        jsonutil.result.total_time: round(test_total_time.seconds(), 1),
    }
    j = jsonutil.dumps(RESULT)
    logger.info("filling '%s' with json: %s", test.result_file(), j)
    with open(test.result_file(), "w") as f:
        f.write(j)
        f.write("\n")

    test_stats.add(test, "tests", str(result))
    result_stats.add_result(result, old_result)
    test_stats.log_summary(logger.info,
                           header="updated test stats:",
                           prefix="  ")
    result_stats.log_summary(logger.info,
                             header="updated test results:",
                             prefix="  ")
Ejemplo n.º 8
0
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_total_time = None

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    with logger.time("processing test %s", test_prefix) as test_total_time:

        ignored, details = ignore.test(logger, args, test)
        if ignored:
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            logger.info("%s %s ignored (%s) %s",
                        prefix, test_prefix, details, suffix)
            return

        # Be lazy when gathering the results, don't run the sanitizer
        # or diff.  Let post.mortem figure out if the test finished.

        old_result = post.mortem(test, args, domain_prefix=domain_prefix,
                                 quick=True, finished=None)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s",
                        prefix, test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            return

        if old_result:
            test_stats.add(test, "tests", "retry")
            logger.info("%s %s started (previously %s) ....",
                        prefix, test_prefix, old_result)
        else:
            test_stats.add(test, "tests", "try")
            logger.info("%s %s started ....", prefix, test_prefix)
        test_stats.add(test, "tests")

        # Create just the OUTPUT/ directory; if it already exists,
        # move any contents to BACKUP/.  Do it file-by-file so that,
        # at no point, the OUTPUT/ directory is missing (having an
        # OUTPUT/ directory implies the test was started).
        #
        # Don't create the path.  If the parent directory is missing,
        # this will fail.
        #
        # By backing up each test just before it is started, a trail
        # of what tests were attempted during each run is created.
        #
        # XXX:
        #
        # During boot, swan-transmogrify runs "chcon -R
        # testing/pluto".  Of course this means that each time a test
        # is added and/or a test is run (adding files under
        # <test>/OUTPUT), the boot process (and consequently the time
        # taken to run a test) keeps increasing.
        #
        # By moving the directory contents to BACKUP/, which is not
        # under testing/pluto/ this problem is avoided.

        try:
            os.mkdir(test.output_directory)
        except FileNotFoundError:
            # Bail, something is messed up (for instance the parent directory doesn't exist).
            return
        except FileExistsError:
            backup_directory = os.path.join(args.backup_directory, test.name)
            logger.info("moving contents of '%s' to '%s'",
                        test.output_directory, backup_directory)
            # Even if OUTPUT/ is empty, copy it.
            os.makedirs(backup_directory, exist_ok=True)
            for name in os.listdir(test.output_directory):
                src = os.path.join(test.output_directory, name)
                dst = os.path.join(backup_directory, name)
                logger.debug("moving '%s' to '%s'", src, dst)
                os.replace(src, dst)

        # Now that the OUTPUT directory is empty, start a debug log
        # writing to that directory; include timing for this test run.

        with logger.debug_time("testing %s", test_prefix,
                               logfile=os.path.join(test.output_directory,
                                                    "debug.log"),
                               loglevel=logutil.INFO) as test_runtime:

            with logger.time("booting domains") as test_boot_time:
                try:
                    test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor)
                except pexpect.TIMEOUT:
                    logger.exception("timeout while booting domains")
                    # Bail before RESULT is written - being unable to
                    # boot the domains is a disaster.  The test is
                    # UNRESOLVED.
                    return

            # Run the scripts directly
            with logger.time("running scripts %s",
                             " ".join(("%s:%s" % (host, script))
                                      for host, script in test.host_script_tuples)) as test_script_time:
                with tcpdump.Dump(logger, domain_prefix, test.output_directory,
                                  [test_domain.domain for test_domain in test_domains.values()],
                                  enable=args.tcpdump):

                    try:

                        # re-direct the test-result log file
                        for test_domain in test_domains.values():
                            output = os.path.join(test.output_directory,
                                                  test_domain.domain.host_name + ".console.verbose.txt")
                            test_domain.console.output(open(output, "w"))

                        try:
                            for host, script in test.host_script_tuples:
                                if args.stop_at == script:
                                    logger.error("stopping test run at (before executing) script %s", script)
                                    break
                                test_domain = test_domains[host]
                                test_domain.read_file_run(script)
                        except pexpect.TIMEOUT as e:
                            # A test ending with a timeout is still a
                            # finished test.  Analysis of the results
                            # will detect this and flag it as a fail.
                            logger.error("**** timeout out while running script %s ****", script)

                    finally:

                        # Close the redirected test-result log files
                        for test_domain in test_domains.values():
                            logfile = test_domain.console.output()
                            logfile.close()

                        # Always disconnect from the test domains.
                        logger.info("closing all test domains")
                        for test_domain in test_domains.values():
                            logfile = test_domain.console.output()
                            if logfile:
                                logfile.close()
                            test_domain.close()

    # The test finished.  Aborts such as a failed boot, or a timeout,
    # will skip all the below.

    result = post.mortem(test, args, domain_prefix=domain_prefix,
                         finished=True, update=True)

    logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                result.issues and " ", result.issues, suffix)

    # Since the the test finished (resolved in POSIX terminology)",
    # emit enough JSON to fool scripts like pluto-testlist-scan.sh.
    #
    # A test that timed-out or crashed, isn't considered resolved.
    #
    # A more robust way of doing this would be to mark each of the
    # console logs as complete as it is closed.
    #
    # More detailed information can be extracted from the debug.log.

    RESULT = {
        jsonutil.result.testname: test.name,
        jsonutil.result.expect: test.status,
        jsonutil.result.result: result,
        jsonutil.result.issues: result.issues,
        jsonutil.result.hosts: test.host_names,
        jsonutil.result.time: jsonutil.ftime(test_total_time.start),
        jsonutil.result.runtime: round(test_runtime.seconds(), 1),
        jsonutil.result.boot_time: round(test_boot_time.seconds(), 1),
        jsonutil.result.script_time: round(test_script_time.seconds(), 1),
        jsonutil.result.total_time: round(test_total_time.seconds(), 1),
    }
    j = jsonutil.dumps(RESULT)
    logger.info("filling '%s' with json: %s", test.result_file(), j)
    with open(test.result_file(), "w") as f:
        f.write(j)
        f.write("\n")

    test_stats.add(test, "tests", str(result))
    result_stats.add_result(result, old_result)
    test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
    result_stats.log_summary(logger.info, header="updated test results:", prefix="  ")
Ejemplo n.º 9
0
def _process_test(domain_prefix, test, args, test_stats, result_stats, test_count, tests_count, boot_executor):

    logger = logutil.getLogger(domain_prefix, __name__, test.name)

    prefix = "******"
    suffix = "******"
    test_stats.add(test, "total")

    test_runtime = test_boot_time = test_script_time = test_post_time = None
    old_result = None
    backup_directory = os.path.join(args.backup_directory, test.name)

    # Would the number of tests to be [re]run be better?
    test_prefix = "%s (test %d of %d)" % (test.name, test_count, tests_count)
    publish.json_status(logger, args, "processing %s" % test_prefix)
    with logger.time("processing test %s", test_prefix):

        # Ignoring the test completely?
        #
        # So that there's no possible confusion over the test being
        # run; remove any pre-existing output.

        ignored, details = ignore.test(logger, args, test)
        if ignored:
            # The isdir() test followed by a simple move, while
            # racy, should be good enough.
            if os.path.isdir(test.output_directory):
                logger.info("moving '%s' to '%s'", test.output_directory,
                            backup_directory)
                os.makedirs(os.path.dirname(backup_directory), exist_ok=True)
                os.rename(test.output_directory, backup_directory)
            result_stats.add_ignored(test, ignored)
            test_stats.add(test, "ignored")
            logger.info("%s %s ignored (%s) %s",
                        prefix, test_prefix, details, suffix)
            return

        # Skip the test, leaving old results?
        #
        # For instance, during a test re-run, skip any tests that are
        # passing.
        #
        # The check below compares the test and expected output,
        # ignoring any previous test result.  This way the results are
        # consistent with kvmresults.py which always reflects the
        # current sources.
        #
        # - modifying the expected output so that it no longer matches
        #   the last result is a fail
        #
        # - modifying the expected output so that it matches the last
        #   result is a pass

        old_result = post.mortem(test, args, domain_prefix=domain_prefix, quick=False)
        if skip.result(logger, args, old_result):
            logger.info("%s %s skipped (previously %s) %s",
                        prefix, test_prefix, old_result, suffix)
            test_stats.add(test, "skipped")
            result_stats.add_skipped(old_result)
            publish.everything(logger, args, old_result)
            return

        # Running the test ...
        #
        # From now on the test will be run so need to perform post
        # mortem.

        try:

            if old_result:
                test_stats.add(test, "tests", "retry")
                logger.info("%s %s started (previously %s) ....",
                            prefix, test_prefix, old_result)
            else:
                test_stats.add(test, "tests", "try")
                logger.info("%s %s started ....", prefix, test_prefix)
            test_stats.add(test, "tests")

            # Create just the OUTPUT/ directory.
            #
            # If the directory already exists, copy the contents
            # BACKUP/.  Do it file-by-file so that, at no point, the
            # OUTPUT/ directory is missing (having an OUTPUT/
            # directory implies the test was started).
            #
            # Don't try to create the path.  If the parent directory
            # is missing, this and the entire script will crash.
            # Someone did something nasty like deleted the parent
            # directory.
            #
            # By backing up each test just before it is started,
            # leaves a trail of what tests were attempted during a
            # test run.
            #
            # XXX:
            #
            # During boot, swan-transmogrify runs "chcon -R
            # testing/pluto".  Of course this means that each time a
            # test is added and/or a test is run (adding files under
            # <test>/OUTPUT), the boot process (and consequently the
            # time taken to run a test) keeps increasing.
            #
            # By moving the directory contents to BACKUP/, which is
            # not under testing/pluto/ this problem is avoided.

            try:
                os.mkdir(test.output_directory)
            except FileExistsError:
                logger.info("moving contents of '%s' to '%s'",
                            test.output_directory, backup_directory)
                # Even if OUTPUT/ is empty, move it.
                os.makedirs(backup_directory, exist_ok=True)
                for name in os.listdir(test.output_directory):
                    src = os.path.join(test.output_directory, name)
                    dst = os.path.join(backup_directory, name)
                    logger.debug("moving '%s' to '%s'", src, dst)
                    os.replace(src, dst)

            # Now that the OUTPUT directory is empty, start a debug
            # log writing to that directory; include timing for this
            # test run.

            with logger.debug_time("testing %s", test_prefix,
                                   logfile=os.path.join(test.output_directory,
                                                        "debug.log"),
                                   loglevel=logutil.INFO) as test_runtime:

                # boot the domains
                with logger.time("booting domains") as test_boot_time:
                    try:
                        test_domains = _boot_test_domains(logger, test, domain_prefix, boot_executor)
                    except pexpect.TIMEOUT:
                        # Bail.  Being unable to boot the domains is a
                        # disaster.  The test is UNRESOLVED.
                        logger.exception("timeout while booting domains")
                        return
                    except pexpect.EOF:
                        # Bail.  Being unable to attach to the domains
                        # is a disaster.  The test is UNRESOLVED.
                        logger.exception("eof (disconnect) while booting domains")
                        return

                # Run the scripts directly
                with logger.time("running scripts %s",
                                 " ".join(("%s:%s" % (host, script))
                                          for host, script in test.host_script_tuples)) as test_script_time:
                    with tcpdump.Dump(logger, domain_prefix, test.output_directory,
                                      [test_domain.domain for test_domain in test_domains.values()],
                                      enable=args.tcpdump):

                        try:

                            # re-direct the test-result log file
                            for test_domain in test_domains.values():
                                output = os.path.join(test.output_directory,
                                                      test_domain.domain.host_name + ".console.verbose.txt")
                                test_domain.console.output(open(output, "w"))

                            for host, script in test.host_script_tuples:
                                if args.stop_at == script:
                                    logger.error("stopping test run at (before executing) script %s", script)
                                    break
                                test_domain = test_domains[host]
                                try:
                                    test_domain.read_file_run(script)
                                except pexpect.TIMEOUT as e:
                                    # A test ending with a timeout
                                    # gets treated as a FAIL.  A
                                    # timeout while running a test
                                    # script is a sign that a command
                                    # hung.
                                    post_timeout = "%s %s:%s" % (post.TIMEOUT, host, script)
                                    logger.warning("*** %s ***" % post_timeout)
                                    test_domain.console.child.logfile.write("%s %s %s" % (post.LHS, post_timeout, post.RHS))
                                    break
                                except BaseException as e:
                                    # if there is an exception, write
                                    # it to the console
                                    test_domain.console.child.logfile.write("\n%s %s %s:%s %rhs\n%s" % (post.LHS, post.EXCEPTION, host, script, post.RHS, str(e)))
                                    raise

                            for test_domain in test_domains.values():
                                test_domain.console.child.logfile.write(post.DONE)

                        finally:

                            # Close the redirected test-result log files
                            logger.info("closing all the test domain log files")
                            for test_domain in test_domains.values():
                                outfile = test_domain.console.output()
                                outfile.close()

                            # Always disconnect from the test domains.
                            logger.info("closing all the test domains")
                            for test_domain in test_domains.values():
                                test_domain.close()

        finally:

            with logger.time("post-mortem %s", test_prefix):
                # The test finished; it is assumed that post.mortem
                # can deal with a crashed test.
                result = post.mortem(test, args, domain_prefix=domain_prefix)
                logger.info("%s %s %s%s%s %s", prefix, test_prefix, result,
                            result.issues and " ", result.issues, suffix)

            result.save()

            # If the test was run (a fresh run would delete RESULT)
            # and finished (resolved in POSIX terminology), emit
            # enough JSON to fool scripts like pluto-testlist-scan.sh.
            #
            # A test that timed-out or crashed, isn't considered
            # resolved so the file isn't created.
            #
            # XXX: this should go away.

            result_file = os.path.join(test.output_directory, "RESULT")
            if not os.path.isfile(result_file) \
            and result.resolution.isresolved():
                RESULT = {
                    jsonutil.result.testname: test.name,
                    jsonutil.result.expect: test.status,
                    jsonutil.result.result: result,
                    jsonutil.result.issues: result.issues,
                    jsonutil.result.hosts: test.host_names,
                    jsonutil.result.time: jsonutil.ftime(test_runtime.start),
                    jsonutil.result.runtime: round(test_runtime.seconds(), 1),
                    jsonutil.result.boot_time: round(test_boot_time.seconds(), 1),
                    jsonutil.result.script_time: round(test_script_time.seconds(), 1),
                }
                j = jsonutil.dumps(RESULT)
                logger.debug("filling '%s' with json: %s", result_file, j)
                with open(result_file, "w") as f:
                    f.write(j)
                    f.write("\n")

            # Do this after RESULT is created so it too is published.
            publish.everything(logger, args, result)
            publish.json_status(logger, args, "finished %s" % test_prefix)

            test_stats.add(test, "tests", str(result))
            result_stats.add_result(result, old_result)
            # test_stats.log_summary(logger.info, header="updated test stats:", prefix="  ")
            result_stats.log_summary(logger.info, header="updated test results:", prefix="  ")