Exemple #1
0
def json_result(logger, args, result):
    if not args.publish_results:
        return

    # Convert the result into json, and ...
    json_builder = printer.JsonBuilder()
    printer.build_result(logger, result, None, args, results_to_print, json_builder)
    json_result = json_builder.json()
    json_result["directory"] = result.test.name

    # ... if there is an output directory, write that also
    outdir = _mkdir_test_output(logger, args, result)
    if outdir:
        # needs to be a relative path
        json_result["output_directory"] = os.path.join(os.path.basename(os.path.dirname(outdir)),
                                                       os.path.basename(outdir))
        path = os.path.join(outdir, "result.json")
        logger.info("writing result to '%s'", path)
        with open(path, "w") as output:
            jsonutil.dump(json_result, output)
            output.write("\n")

    # accumulate results.json
    JSON_RESULTS.append(json_result)

    # accumulate summary.json
    _add(JSON_SUMMARY, "totals", result.test.kind, result.test.status, str(result))
    for issue in result.issues:
        for domain in result.issues[issue]:
            # count the number of times it occurred
            _add(JSON_SUMMARY, "totals", result.test.kind, result.test.status, "errors", issue)

    # extend the times
    _update_time(min, "start_time", json_result)
    _update_time(max, "stop_time", json_result)
Exemple #2
0
def results(logger, tests, baseline, args, result_stats):

    for test in tests:

        publish.json_status(logger, args, "rebuilding %s" % test.name)

        # If debug logging is enabled this will provide fine grained
        # per-test timing.
        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                continue

            # Filter out test results that are being skipped.
            #
            # XXX: In the default case (skip=[UNTESTED]) this should
            # be cheap (does OUTPUT/ exist?).  It isn't, instead a
            # full post-mortem analysis is performed.
            #
            # This is noticeable when printing static test value such
            # as the test's name takes far longer than one would
            # expect.
            result = post.mortem(test,
                                 args,
                                 baseline=baseline,
                                 output_directory=test.saved_output_directory,
                                 quick=args.quick)
            if args.update:
                result.save()
            if args.skip:
                if skip.result(logger, args, result):
                    result_stats.add_skipped(result)
                    continue
            result_stats.add_result(result)

            publish.test_files(logger, args, result)
            publish.test_output_files(logger, args, result)
            publish.json_result(logger, args, result)

            if baseline and post.Issues.CRASHED.isdisjoint(result.issues):
                # Since there is a baseline and the test didn't crash
                # limit what is printed to just those where the
                # baseline's result is different.
                #
                # Note that, this skips baseline-different - where the
                # baseline failed for a different reason.
                if {post.Issues.BASELINE_FAILED,
                        post.Issues.BASELINE_PASSED}.isdisjoint(result.issues):
                    continue

            b = args.json and printer.JsonBuilder(
                sys.stdout) or printer.TextBuilder(sys.stdout)
            printer.build_result(logger, result, baseline, args, args.print, b)

    publish.json_status(logger, args, "finished")
Exemple #3
0
def results(logger, tests, baseline, args, result_stats):

    for test in tests:

        publish.json_status(logger, args, "rebuilding %s" % test.name)

        # If debug logging is enabled this will provide fine grained
        # per-test timing.
        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                continue

            # Filter out test results that are being skipped.
            #
            # XXX: In the default case (skip=[UNTESTED]) this should
            # be cheap (does OUTPUT/ exist?).  It isn't, instead a
            # full post-mortem analysis is performed.
            #
            # This is noticable when printing static test value such
            # as the test's name takes far longer than one would
            # expect.
            result = post.mortem(test,
                                 args,
                                 baseline=baseline,
                                 output_directory=test.saved_output_directory,
                                 quick=args.quick,
                                 update=args.update)
            if args.skip:
                if skip.result(logger, args, result):
                    result_stats.add_skipped(result)
                    continue
            result_stats.add_result(result)

            publish.test_files(logger, args, result)
            publish.test_output_files(logger, args, result)
            publish.json_result(logger, args, result)

            # If there is a baseline; limit what is printed to just
            # those that differ.
            if baseline:
                baseline_issue = False
                for issue in result.issues:
                    if "baseline" in issue:
                        baseline_issue = True
                        break
                if not baseline_issue:
                    continue
            b = args.json and printer.JsonBuilder(
                sys.stdout) or printer.TextBuilder(sys.stdout)
            printer.build_result(logger, result, baseline, args, args.print, b)

    publish.json_status(logger, args, "finished")
Exemple #4
0
def results(logger, tests, baseline, args, result_stats):

    failures = 0
    unresolved = 0
    passed = 0
    nr = 0

    for test in tests:

        nr = nr + 1
        publish.json_status(logger, args,
                            "rebuilding %s (test %d of %d)" % (test.name, nr, len(tests)))

        # If debug logging is enabled this will provide fine grained
        # per-test timing.

        with logger.debug_time("processing test %s", test.name):

            # Filter out tests that are being ignored?
            ignored, details = ignore.test(logger, args, test)
            if ignored:
                result_stats.add_ignored(test, ignored)
                continue

            # Filter out test results that are being skipped.
            #
            # XXX: In the default case (skip=[UNTESTED]) this should
            # be cheap (does OUTPUT/ exist?).  It isn't, instead a
            # full post-mortem analysis is performed.
            #
            # This is noticeable when printing static test value such
            # as the test's name takes far longer than one would
            # expect.
            result = post.mortem(test, args,
                                 baseline=baseline,
                                 output_directory=test.saved_output_directory,
                                 quick=args.quick)
            if args.update:
                result.save()
            if args.skip:
                if printer.Print.RESULT in args.print \
                and skip.result(logger, args, result):
                    result_stats.add_skipped(result)
                    continue
            result_stats.add_result(result)

            if result.resolution in [post.Resolution.PASSED,
                                     post.Resolution.UNTESTED,
                                     post.Resolution.UNSUPPORTED]:
                passed = passed + 1
            elif result.resolution in [post.Resolution.UNRESOLVED]:
                unresolved = unresolved + 1
            else:
                failures = failures + 1

            publish.test_files(logger, args, result)
            publish.test_output_files(logger, args, result)
            publish.json_result(logger, args, result)

            if baseline and test in baseline \
               and not result.issues.crashed():
                # Since there is a baseline and the test didn't crash
                # limit what is printed to just those where the
                # baseline's result is different.
                #
                # Note that, this skips baseline-different - where the
                # baseline failed for a different reason.
                if {post.Issues.BASELINE_FAILED, post.Issues.BASELINE_PASSED}.isdisjoint(result.issues):
                    continue

            b = args.json and printer.JsonBuilder(sys.stdout) or printer.TextBuilder(sys.stdout)
            printer.build_result(logger, result, baseline, args, args.print, b)

        publish.json_results(logger, args)
        publish.json_summary(logger, args)

    publish.json_status(logger, args, "finished")

    # exit code
    if args.exit_ok:
        return 0
    elif unresolved:
        return 125 # 'git bisect' magic for don't know
    elif failures:
        return 1
    else:
        return 0