Esempio n. 1
0
def main():
    # Parse arguments
    parser = argparse.ArgumentParser(description="Simple Regression Framework")
    parser.add_argument("-s",
                        "--strict",
                        action="store_true",
                        help="be strict when parsing test XML files")
    parser.add_argument("-d",
                        "--directory",
                        action="store",
                        metavar="DIR",
                        help="directory to search for test files",
                        default=os.getcwd())
    parser.add_argument(
        "-x",
        "--exclude",
        action="append",
        metavar="DIR",
        help=
        "directory to exclude while searching for test files (multiple -x can be given)",
        default=[])
    parser.add_argument("--brief",
                        action="store_true",
                        help="don't print failure logs at end of test run")
    parser.add_argument("-l",
                        "--list",
                        action="store_true",
                        help="list known tests")
    parser.add_argument("--legacy",
                        action="store_true",
                        help="use legacy 'IsaMakefile' specs")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="print test output")
    parser.add_argument("--limit",
                        action="store",
                        help="set line limit for logs",
                        default=40)
    parser.add_argument(
        "-t",
        "--transitive",
        action="store_true",
        help=
        "also run dependencies transitively (only effective when TESTS specified)"
    )
    parser.add_argument("tests",
                        metavar="TESTS",
                        help="tests to run (defaults to all tests)",
                        nargs="*")
    args = parser.parse_args()

    # Search for test files:
    if not args.legacy:
        test_xml = sorted(xrglob(args.directory, "tests.xml", args.exclude))
        tests = testspec.parse_test_files(test_xml, strict=args.strict)
    else:
        # Fetch legacy tests.
        tests = testspec.legacy_testspec(args.directory)

    # Calculate which tests should be run.
    tests_to_run = []  # type: list[testspec.Test]
    if len(args.tests) == 0:
        tests_to_run = tests
    else:
        desired_names = set(args.tests)  # type: set[str]
        bad_names = desired_names - set([t.name for t in tests])
        if len(bad_names) > 0:
            parser.error("Unknown test names: %s" %
                         (", ".join(sorted(bad_names))))
        tests_to_run = [t for t in tests if t.name in desired_names]

    # Automatically include dependencies.
    if args.transitive:
        tests_to_run = testspec.add_deps(tests_to_run, tests)

    # List test names if requested.
    if args.list:
        for t in tests_to_run:
            print(t.name)
        sys.exit(0)

    # If running at least one test, and psutil is not available, print a warning.
    if len(tests_to_run) > 0 and not PS_UTIL_AVAILABLE:
        print(
            "\n"
            "Warning: 'psutil' module not available. Processes may not be correctly\n"
            "stopped. Run\n"
            "\n"
            "    pip install --user psutil\n"
            "\n"
            "to install.\n"
            "\n")

    # Run the tests.
    print("Running %d test(s)...\n" % len(tests_to_run))
    failed_tests = set()  #type: set[testspec.Test]
    failed_test_log = []
    for t in tests_to_run:
        if len(t.depends & failed_tests) > 0:  # tests that are in both
            print_test_line(t.name, ANSI_YELLOW, "skipped", None, None)
            failed_tests.add(t.name)
            continue

        # Run the test.
        print_test_line_start(t.name, verbose=args.verbose)
        (passed, status, log, time_taken, mem) = run_test(t,
                                                          verbose=args.verbose)

        # Print result.
        if not passed:
            failed_tests.add(t.name)
            failed_test_log.append((t.name, log, time_taken))
            print_test_line_end(t.name,
                                ANSI_RED,
                                "%s *" % status,
                                time_taken,
                                mem,
                                verbose=args.verbose)
        else:
            print_test_line_end(t.name,
                                ANSI_GREEN,
                                status,
                                time_taken,
                                mem,
                                verbose=args.verbose)

    # Print failure summaries unless requested not to.
    if not args.brief and len(failed_test_log) > 0:
        print("")
        log_limit = int(args.limit)
        for (failed_test, log, _) in failed_test_log:
            print_line()
            print("TEST FAILURE: %s" % failed_test)
            print("")
            log = log.rstrip("\n") + "\n"
            lines = log.split("\n")
            if len(lines) > 2 * log_limit:
                lines = lines[:log_limit] + ["..."] + lines[-log_limit:]
            print("\n".join(lines))
        print_line()

    # Print summary.
    print(
        ("\n\n" + output_color(ANSI_WHITE, "%d/%d tests succeeded.") + "\n") %
        (len(tests_to_run) - len(failed_tests), len(tests_to_run)))
    if len(failed_tests) > 0:
        print(output_color(ANSI_RED, "Tests failed.") + "\n")
        sys.exit(1)
    else:
        print(output_color(ANSI_GREEN, "All tests passed.") + "\n")
        sys.exit(0)
Esempio n. 2
0
def main():
    # Parse arguments
    parser = argparse.ArgumentParser(
        description="Parallel Regression Framework")
    parser.add_argument("-s",
                        "--strict",
                        action="store_true",
                        help="be strict when parsing test XML files")
    parser.add_argument("-d",
                        "--directory",
                        action="store",
                        metavar="DIR",
                        help="directory to search for test files",
                        default=os.getcwd())
    parser.add_argument("--brief",
                        action="store_true",
                        help="don't print failure logs at end of test run")
    parser.add_argument("-j",
                        "--jobs",
                        type=int,
                        default=1,
                        help="Number of tests to run in parallel")
    parser.add_argument("-l",
                        "--list",
                        action="store_true",
                        help="list known tests")
    parser.add_argument("--legacy",
                        action="store_true",
                        help="use legacy 'IsaMakefile' specs")
    parser.add_argument("--legacy-status",
                        action="store_true",
                        help="emulate legacy (sequential code) status lines")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="print test output")
    parser.add_argument("--junit-report",
                        metavar="FILE",
                        help="write JUnit-style test report")
    parser.add_argument("tests",
                        metavar="TESTS",
                        help="tests to run (defaults to all tests)",
                        nargs="*")
    args = parser.parse_args()

    if args.jobs < 1:
        parser.error("Number of parallel jobs must be at least 1")

    # Search for test files:
    if not args.legacy:
        test_xml = sorted(rglob(args.directory, "tests.xml"))
        tests = testspec.parse_test_files(test_xml, strict=args.strict)
    else:
        # Fetch legacy tests.
        tests = testspec.legacy_testspec(args.directory)

    # List test names if requested.
    if args.list:
        for t in tests:
            print(t.name)
        sys.exit(0)

    # Calculate which tests should be run.
    tests_to_run = []
    if len(args.tests) == 0:
        tests_to_run = tests
    else:
        desired_names = set(args.tests)
        bad_names = desired_names - set([t.name for t in tests])
        if len(bad_names) > 0:
            parser.error("Unknown test names: %s" %
                         (", ".join(sorted(bad_names))))
        tests_to_run = [t for t in tests if t.name in desired_names]

    # If running at least one test, and psutil is not available, print a warning.
    if len(tests_to_run) > 0 and not PS_UTIL_AVAILABLE:
        print(
            "\n"
            "Warning: 'psutil' module not available. Processes may not be correctly\n"
            "stopped. Run\n"
            "\n"
            "    pip install --user psutil\n"
            "\n"
            "to install.\n"
            "\n")

    # Run the tests.
    print("Running %d test(s)..." % len(tests_to_run))
    failed_tests = set()
    passed_tests = set()
    test_results = {}

    # Use a simple list to store the pending queue. We track the dependencies separately.
    tests_queue = tests_to_run[:]
    # Current jobs.
    current_jobs = {}
    # Output status.
    status_queue = Queue.Queue()

    # If run from a tty and -v is off, we also track
    # current jobs on the bottom line of the tty.
    # We cache this status line to help us wipe it later.
    tty_status_line = [""]

    def wipe_tty_status():
        if tty_status_line[0]:
            print(" " * len(tty_status_line[0]) + "\r", end="")
            sys.stdout.flush()
            tty_status_line[0] = ""

    while tests_queue or current_jobs:
        # Update status line with pending jobs.
        if current_jobs and sys.stdout.isatty() and not args.verbose:
            tty_status_line[0] = "Running: " + ", ".join(
                sorted(current_jobs.keys()))
            print(tty_status_line[0] + "\r", end="")
            sys.stdout.flush()

        # Check if we have a job slot.
        if len(current_jobs) < args.jobs:
            # Find the first non-blocked test and handle it.
            for i, t in enumerate(tests_queue):
                # Leave out dependencies that were excluded at the command line.
                real_depends = t.depends & set(t.name for t in tests_to_run)
                # Non-blocked and open. Start it.
                if real_depends.issubset(passed_tests):
                    test_thread = threading.Thread(target=run_test,
                                                   name=t.name,
                                                   args=(t, status_queue,
                                                         args.verbose))
                    wipe_tty_status()
                    print_test_line_start(t.name, args.legacy_status)
                    test_thread.start()
                    current_jobs[t.name] = test_thread
                    popped_test = True
                    del tests_queue[i]
                    break
                # Non-blocked but depends on a failed test. Remove it.
                if len(real_depends & failed_tests) > 0:
                    wipe_tty_status()
                    print_test_line(t.name, ANSI_YELLOW, "skipped", None, None,
                                    args.legacy_status)
                    failed_tests.add(t.name)
                    del tests_queue[i]
                    break

        # Wait for jobs to complete.
        try:
            while True:
                info = status_queue.get(block=True,
                                        timeout=0.1337)  # Built-in pause
                name = info['name']
                del current_jobs[name]

                status, log, time_taken, mem = info['status'], info[
                    'output'], info['real_time'], info['mem_usage']
                test_results[name] = info

                # Print result.
                wipe_tty_status()
                if status != 'pass':
                    failed_tests.add(name)
                    print_test_line(name, ANSI_RED, "%s *" % status,
                                    time_taken, mem, args.legacy_status)
                else:
                    passed_tests.add(name)
                    print_test_line(name, ANSI_GREEN, status, time_taken, mem,
                                    args.legacy_status)
        except Queue.Empty:
            pass
    wipe_tty_status()

    # Print failure summaries unless requested not to.
    if not args.brief and len(failed_tests) > 0:
        LINE_LIMIT = 40

        def print_line():
            print("".join(["-" for x in range(72)]))

        print("")
        # Sort failed_tests according to tests_to_run
        for t in tests_to_run:
            if t.name not in failed_tests:
                continue
            if t.name not in test_results:
                continue

            print_line()
            print("TEST FAILURE: %s" % t.name)
            print("")
            log = test_results[t.name]['output'].rstrip("\n") + "\n"
            lines = log.split("\n")
            if len(lines) > LINE_LIMIT:
                lines = ["..."] + lines[-LINE_LIMIT:]
            print("\n".join(lines))
        print_line()

    # Print JUnit-style test report.
    # reference: https://github.com/notnoop/hudson-tools/blob/master/toJunitXML/sample-junit.xml
    if args.junit_report is not None:
        testsuite = ET.Element("testsuite")
        for t in tests_to_run:
            if t.name not in test_results:
                # test was skipped
                testcase = ET.SubElement(testsuite,
                                         "testcase",
                                         classname="",
                                         name=t.name,
                                         time="0")
                ET.SubElement(
                    testcase, "error",
                    type="error").text = ("Failed dependencies: " +
                                          ', '.join(t.depends & failed_tests))
            else:
                info = test_results[t.name]
                testcase = ET.SubElement(testsuite,
                                         "testcase",
                                         classname="",
                                         name=t.name,
                                         time='%f' %
                                         info['real_time'].total_seconds())
                if info['status'] == "FAILED":
                    ET.SubElement(testcase, "failure",
                                  type="failure").text = info['output']
                elif info['status'] == "TIMEOUT":
                    ET.SubElement(testcase, "error",
                                  type="timeout").text = info['output']
                else:
                    if not args.verbose:
                        ET.SubElement(testcase,
                                      "system-out").text = info['output']
        ET.ElementTree(testsuite).write(args.junit_report)

    # Print summary.
    print(
        ("\n\n" + output_color(ANSI_WHITE, "%d/%d tests succeeded.") + "\n") %
        (len(tests_to_run) - len(failed_tests), len(tests_to_run)))
    if len(failed_tests) > 0:
        print(output_color(ANSI_RED, "Tests failed.") + "\n")
        sys.exit(1)
    else:
        print(output_color(ANSI_GREEN, "All tests passed.") + "\n")
        sys.exit(0)
Esempio n. 3
0
def main():
    # Parse arguments
    parser = argparse.ArgumentParser(description="Parallel Regression Framework")
    parser.add_argument("-s", "--strict", action="store_true",
            help="be strict when parsing test XML files")
    parser.add_argument("-d", "--directory", action="store",
            metavar="DIR", help="directory to search for test files",
            default=os.getcwd())
    parser.add_argument("--brief", action="store_true",
            help="don't print failure logs at end of test run")
    parser.add_argument("-f", "--fail-fast", action="store_true",
            help="exit once the first failure is detected")
    parser.add_argument("-j", "--jobs", type=int, default=1,
            help="Number of tests to run in parallel")
    parser.add_argument("-l", "--list", action="store_true",
            help="list known tests")
    parser.add_argument("--legacy", action="store_true",
            help="use legacy 'IsaMakefile' specs")
    # --legacy-status used by top-level regression-v2 script
    parser.add_argument("--legacy-status", action="store_true",
            help="emulate legacy (sequential code) status lines")
    parser.add_argument("-x", "--exclude", action="append", metavar="TEST", default=[],
            help="exclude tests (one -x per test)")
    parser.add_argument("-v", "--verbose", action="store_true",
            help="print test output")
    parser.add_argument("--junit-report", metavar="FILE",
            help="write JUnit-style test report")
    parser.add_argument("--stuck-timeout", type=int, default=600, metavar='N',
            help="timeout tests if not using CPU for N seconds (default: 600)")
    parser.add_argument("--grace-period", type=float, default=5, metavar='N',
            help="notify processes N seconds before killing them (default: 5)")
    parser.add_argument("tests", metavar="TESTS",
            help="tests to run (defaults to all tests)",
            nargs="*")
    args = parser.parse_args()

    if args.jobs < 1:
        parser.error("Number of parallel jobs must be at least 1")

    # Search for test files:
    if not args.legacy:
        test_xml = sorted(rglob(args.directory, "tests.xml"))
        tests = testspec.parse_test_files(test_xml, strict=args.strict)
    else:
        # Fetch legacy tests.
        tests = testspec.legacy_testspec(args.directory)

    # List test names if requested.
    if args.list:
        for t in tests:
            print(t.name)
        sys.exit(0)

    # Calculate which tests should be run.
    tests_to_run = []
    if len(args.tests) == 0:
        tests_to_run = tests
    else:
        desired_names = set(args.tests)
        bad_names = desired_names - set([t.name for t in tests])
        if len(bad_names) > 0:
            parser.error("Unknown test names: %s" % (", ".join(sorted(bad_names))))
        tests_to_run = [t for t in tests if t.name in desired_names]

    args.exclude = set(args.exclude)
    bad_names = args.exclude - set(t.name for t in tests)
    if bad_names:
        parser.error("Unknown test names: %s" % (", ".join(sorted(bad_names))))
    tests_to_run = [t for t in tests_to_run if t.name not in args.exclude]

    # Run the tests.
    print("Running %d test(s)..." % len(tests_to_run))
    failed_tests = set()
    passed_tests = set()
    test_results = {}

    # Use a simple list to store the pending queue. We track the dependencies separately.
    tests_queue = tests_to_run[:]
    # Current jobs.
    current_jobs = {}
    # Newly finished jobs.
    status_queue = Queue.Queue()

    # If run from a tty and -v is off, we also track
    # current jobs on the bottom line of the tty.
    # We cache this status line to help us wipe it later.
    tty_status_line = [""]
    def wipe_tty_status():
        if tty_status_line[0]:
            print(" " * len(tty_status_line[0]) + "\r", end="")
            sys.stdout.flush()
            tty_status_line[0] = ""

    # Handle --fail-fast
    kill_switch = threading.Event()

    while tests_queue or current_jobs:
        # Update status line with pending jobs.
        if current_jobs and sys.stdout.isatty() and not args.verbose:
            tty_status_line[0] = "Running: " + ", ".join(sorted(current_jobs.keys()))
            print(tty_status_line[0] + "\r", end="")
            sys.stdout.flush()

        # Check if we have a job slot.
        if len(current_jobs) < args.jobs:
            # Find the first non-blocked test and handle it.
            for i, t in enumerate(tests_queue):
                # Leave out dependencies that were excluded at the command line.
                real_depends = t.depends & set(t.name for t in tests_to_run)
                # Non-blocked but depends on a failed test. Remove it.
                if (len(real_depends & failed_tests) > 0
                    # --fail-fast triggered, fail all subsequent tests
                    or kill_switch.is_set()):

                    wipe_tty_status()
                    print_test_line(t.name, ANSI_YELLOW, SKIPPED, legacy=args.legacy_status)
                    failed_tests.add(t.name)
                    del tests_queue[i]
                    break
                # Non-blocked and open. Start it.
                if real_depends.issubset(passed_tests):
                    test_thread = threading.Thread(target=run_test, name=t.name,
                                                   args=(t, status_queue, kill_switch,
                                                         args.verbose, args.stuck_timeout, args.grace_period))
                    wipe_tty_status()
                    print_test_line_start(t.name, args.legacy_status)
                    test_thread.start()
                    current_jobs[t.name] = test_thread
                    popped_test = True
                    del tests_queue[i]
                    break

        # Wait for jobs to complete.
        try:
            while True:
                info = status_queue.get(block=True, timeout=0.1337) # Built-in pause
                name, status = info['name'], info['status']

                test_results[name] = info
                del current_jobs[name]

                # Print result.
                wipe_tty_status()
                if status is PASSED:
                    passed_tests.add(name)
                    colour = ANSI_GREEN
                elif status is CANCELLED:
                    failed_tests.add(name)
                    colour = ANSI_YELLOW
                else:
                    failed_tests.add(name)
                    colour = ANSI_RED
                print_test_line(name, colour, status,
                                real_time=info['real_time'], cpu_time=info['cpu_time'], mem=info['mem_usage'],
                                legacy=args.legacy_status)
                if args.fail_fast and status != PASSED:
                    # Notify current threads and future tests
                    kill_switch.set()
        except Queue.Empty:
            pass
    wipe_tty_status()

    # Print failure summaries unless requested not to.
    if not args.brief and len(failed_tests) > 0:
        LINE_LIMIT = 40
        def print_line():
            print("".join(["-" for x in range(72)]))
        print("")
        # Sort failed_tests according to tests_to_run
        for t in tests_to_run:
            if t.name not in failed_tests:
                continue
            if t.name not in test_results:
                continue

            print_line()
            print("TEST %s: %s" % (status_name[test_results[t.name]['status']], t.name))
            print("")
            output = test_results[t.name]['output'].rstrip("\n")
            if output:
                lines = output.split("\n") + ['']
            else:
                lines = ['(no output)']
            if len(lines) > LINE_LIMIT:
                lines = ["..."] + lines[-LINE_LIMIT:]
            print("\n".join(lines))
        print_line()

    # Print JUnit-style test report.
    # reference: https://github.com/notnoop/hudson-tools/blob/master/toJunitXML/sample-junit.xml
    if args.junit_report is not None:
        testsuite = ET.Element("testsuite")
        for t in tests_to_run:
            if t.name not in test_results:
                # test was skipped
                testcase = ET.SubElement(testsuite, "testcase",
                                         classname="", name=t.name, time="0")
                if t.depends & failed_tests:
                    ET.SubElement(testcase, "error", type="error").text = (
                        "Failed dependencies: " + ', '.join(t.depends & failed_tests))
                else:
                    ET.SubElement(testcase, "error", type="error").text = "Cancelled"
            else:
                info = test_results[t.name]
                testcase = ET.SubElement(testsuite, "testcase",
                                         classname="", name=t.name, time='%f' % info['real_time'].total_seconds())
                if info['status'] is PASSED:
                    if not args.verbose:
                        ET.SubElement(testcase, "system-out").text = info['output']
                elif info['status'] is FAILED:
                    ET.SubElement(testcase, "failure", type="failure").text = info['output']
                elif info['status'] in (TIMEOUT, CPU_TIMEOUT):
                    ET.SubElement(testcase, "error", type="timeout").text = info['output']
                elif info['status'] is STUCK:
                    ET.SubElement(testcase, "error", type="stuck").text = info['output']
                elif info['status'] is CANCELLED:
                    ET.SubElement(testcase, "error", type="cancelled").text = info['output']
                elif info['status'] is ERROR:
                    ET.SubElement(testcase, "error", type="error").text = info['output']
                else:
                    warnings.warn("Unknown status code: {}".format(info['status']))
                    ET.SubElement(testcase, "error", type="unknown").text = info['output']

        ET.ElementTree(testsuite).write(args.junit_report)

    # Print summary.
    print(("\n\n"
            + output_color(ANSI_WHITE, "%d/%d tests succeeded.") + "\n")
            % (len(tests_to_run) - len(failed_tests), len(tests_to_run)))
    if len(failed_tests) > 0:
        print(output_color(ANSI_RED, "Tests failed.") + "\n")
        if kill_switch.is_set():
            print("Exiting early due to --fail-fast.")
        sys.exit(1)
    else:
        print(output_color(ANSI_GREEN, "All tests passed.") + "\n")
        sys.exit(0)
Esempio n. 4
0
def main():
    # Parse arguments
    parser = argparse.ArgumentParser(
        description="Parallel Regression Framework",
        epilog=
        "RUN_TESTS_DEFAULT can be used to overwrite the default set of tests")
    parser.add_argument("-s",
                        "--strict",
                        action="store_true",
                        help="be strict when parsing test XML files")
    parser.add_argument("-d",
                        "--directory",
                        action="store",
                        metavar="DIR",
                        help="directory to search for test files",
                        default=os.getcwd())
    parser.add_argument("--brief",
                        action="store_true",
                        help="don't print failure logs at end of test run")
    parser.add_argument("-f",
                        "--fail-fast",
                        action="store_true",
                        help="exit once the first failure is detected")
    parser.add_argument("-j",
                        "--jobs",
                        type=int,
                        default=1,
                        help="Number of tests to run in parallel")
    parser.add_argument("-l",
                        "--list",
                        action="store_true",
                        help="list known tests")
    parser.add_argument(
        "--no-dependencies",
        action="store_true",
        help="don't check for dependencies when running specific tests")
    parser.add_argument("--legacy",
                        action="store_true",
                        help="use legacy 'IsaMakefile' specs")
    # --legacy-status used by top-level regression-v2 script
    parser.add_argument("--legacy-status",
                        action="store_true",
                        help="emulate legacy (sequential code) status lines")
    parser.add_argument("-x",
                        "--exclude",
                        action="append",
                        metavar="TEST",
                        default=[],
                        help="exclude tests (one -x per test)")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="print test output")
    parser.add_argument("--junit-report",
                        metavar="FILE",
                        help="write JUnit-style test report")
    parser.add_argument(
        "--stuck-timeout",
        type=int,
        default=600,
        metavar='N',
        help="timeout tests if not using CPU for N seconds (default: 600)")
    parser.add_argument(
        "--grace-period",
        type=float,
        default=5,
        metavar='N',
        help="notify processes N seconds before killing them (default: 5)")
    parser.add_argument("tests",
                        metavar="TESTS",
                        help="tests to run (defaults to all tests)",
                        nargs="*")
    args = parser.parse_args()

    if args.jobs < 1:
        parser.error("Number of parallel jobs must be at least 1")

    # Search for test files:
    if not args.legacy:
        test_xml = sorted(rglob(args.directory, "tests.xml"))
        tests = testspec.parse_test_files(test_xml, strict=args.strict)
    else:
        # Fetch legacy tests.
        tests = testspec.legacy_testspec(args.directory)

    # List test names if requested.
    if args.list:
        for t in tests:
            print(t.name)
        sys.exit(0)

    # Calculate which tests should be run.
    tests_to_run = []
    if len(args.tests) == 0 and not os.environ.get('RUN_TESTS_DEFAULT'):
        tests_to_run = tests
    else:
        desired_names = set(args.tests) or set(
            os.environ.get('RUN_TESTS_DEFAULT').split())
        bad_names = desired_names - set([t.name for t in tests])
        if len(bad_names) > 0:
            parser.error("Unknown test names: %s" %
                         (", ".join(sorted(bad_names))))
        # Given a list of names return the corresponding set of Test objects.
        get_tests = lambda x: {t for t in tests if t.name in x}

        # Given a list/set of Tests return a superset that includes all dependencies.
        def get_deps(x):
            x.update({t for w in x for t in get_deps(get_tests(w.depends))})
            return x

        tests_to_run_set = get_tests(desired_names)
        # Are we skipping dependencies? if not, add them.
        if not args.no_dependencies:
            tests_to_run_set = get_deps(tests_to_run_set)
        # Preserve the order of the original set of Tests.
        tests_to_run = [t for t in tests if t in tests_to_run_set]

    args.exclude = set(args.exclude)
    bad_names = args.exclude - set(t.name for t in tests)
    if bad_names:
        parser.error("Unknown test names: %s" % (", ".join(sorted(bad_names))))
    tests_to_run = [t for t in tests_to_run if t.name not in args.exclude]

    # Run the tests.
    print("Running %d test(s)..." % len(tests_to_run))
    failed_tests = set()
    passed_tests = set()
    test_results = {}

    # Use a simple list to store the pending queue. We track the dependencies separately.
    tests_queue = tests_to_run[:]
    # Current jobs.
    current_jobs = {}
    # Newly finished jobs.
    status_queue = Queue.Queue()

    # If run from a tty and -v is off, we also track
    # current jobs on the bottom line of the tty.
    # We cache this status line to help us wipe it later.
    tty_status_line = [""]

    def wipe_tty_status():
        if tty_status_line[0]:
            print(" " * len(tty_status_line[0]) + "\r", end="")
            sys.stdout.flush()
            tty_status_line[0] = ""

    # Handle --fail-fast
    kill_switch = threading.Event()

    while tests_queue or current_jobs:
        # Update status line with pending jobs.
        if current_jobs and sys.stdout.isatty() and not args.verbose:
            tty_status_line[0] = "Running: " + ", ".join(
                sorted(current_jobs.keys()))
            print(tty_status_line[0] + "\r", end="")
            sys.stdout.flush()

        # Check if we have a job slot.
        if len(current_jobs) < args.jobs:
            # Find the first non-blocked test and handle it.
            for i, t in enumerate(tests_queue):
                # Leave out dependencies that were excluded at the command line.
                real_depends = t.depends & set(t.name for t in tests_to_run)
                # Non-blocked but depends on a failed test. Remove it.
                if (len(real_depends & failed_tests) > 0
                        # --fail-fast triggered, fail all subsequent tests
                        or kill_switch.is_set()):

                    wipe_tty_status()
                    print_test_line(t.name,
                                    ANSI_YELLOW,
                                    SKIPPED,
                                    legacy=args.legacy_status)
                    failed_tests.add(t.name)
                    del tests_queue[i]
                    break
                # Non-blocked and open. Start it.
                if real_depends.issubset(passed_tests):
                    test_thread = threading.Thread(
                        target=run_test,
                        name=t.name,
                        args=(t, status_queue, kill_switch, args.verbose,
                              args.stuck_timeout, args.grace_period))
                    wipe_tty_status()
                    print_test_line_start(t.name, args.legacy_status)
                    test_thread.start()
                    current_jobs[t.name] = test_thread
                    popped_test = True
                    del tests_queue[i]
                    break

        # Wait for jobs to complete.
        try:
            while True:
                info = status_queue.get(block=True,
                                        timeout=0.1337)  # Built-in pause
                name, status = info['name'], info['status']

                test_results[name] = info
                del current_jobs[name]

                # Print result.
                wipe_tty_status()
                if status is PASSED:
                    passed_tests.add(name)
                    colour = ANSI_GREEN
                elif status is CANCELLED:
                    failed_tests.add(name)
                    colour = ANSI_YELLOW
                else:
                    failed_tests.add(name)
                    colour = ANSI_RED
                print_test_line(name,
                                colour,
                                status,
                                real_time=info['real_time'],
                                cpu_time=info['cpu_time'],
                                mem=info['mem_usage'],
                                legacy=args.legacy_status)
                if args.fail_fast and status != PASSED:
                    # Notify current threads and future tests
                    kill_switch.set()
        except Queue.Empty:
            pass
    wipe_tty_status()

    # Print failure summaries unless requested not to.
    if not args.brief and len(failed_tests) > 0:
        LINE_LIMIT = 40

        def print_line():
            print("".join(["-" for x in range(72)]))

        print("")
        # Sort failed_tests according to tests_to_run
        for t in tests_to_run:
            if t.name not in failed_tests:
                continue
            if t.name not in test_results:
                continue

            print_line()
            print("TEST %s: %s" %
                  (status_name[test_results[t.name]['status']], t.name))
            print("")
            output = test_results[t.name]['output'].rstrip("\n")
            if output:
                lines = output.split("\n") + ['']
            else:
                lines = ['(no output)']
            if len(lines) > LINE_LIMIT:
                lines = ["..."] + lines[-LINE_LIMIT:]
            print("\n".join(lines))
        print_line()

    # Print JUnit-style test report.
    # reference: https://github.com/notnoop/hudson-tools/blob/master/toJunitXML/sample-junit.xml
    if args.junit_report is not None:
        testsuite = ET.Element("testsuite")
        for t in tests_to_run:
            if t.name not in test_results:
                # test was skipped
                testcase = ET.SubElement(testsuite,
                                         "testcase",
                                         classname="",
                                         name=t.name,
                                         time="0")
                if t.depends & failed_tests:
                    ET.SubElement(testcase, "error", type="error").text = (
                        "Failed dependencies: " +
                        ', '.join(t.depends & failed_tests))
                else:
                    ET.SubElement(testcase, "error",
                                  type="error").text = "Cancelled"
            else:
                info = test_results[t.name]
                testcase = ET.SubElement(testsuite,
                                         "testcase",
                                         classname="",
                                         name=t.name,
                                         time='%f' %
                                         info['real_time'].total_seconds())
                if info['status'] is PASSED:
                    if not args.verbose:
                        ET.SubElement(testcase,
                                      "system-out").text = info['output']
                elif info['status'] is FAILED:
                    ET.SubElement(testcase, "failure",
                                  type="failure").text = info['output']
                elif info['status'] in (TIMEOUT, CPU_TIMEOUT):
                    ET.SubElement(testcase, "error",
                                  type="timeout").text = info['output']
                elif info['status'] is STUCK:
                    ET.SubElement(testcase, "error",
                                  type="stuck").text = info['output']
                elif info['status'] is CANCELLED:
                    ET.SubElement(testcase, "error",
                                  type="cancelled").text = info['output']
                elif info['status'] is ERROR:
                    ET.SubElement(testcase, "error",
                                  type="error").text = info['output']
                else:
                    warnings.warn("Unknown status code: {}".format(
                        info['status']))
                    ET.SubElement(testcase, "error",
                                  type="unknown").text = info['output']

        ET.ElementTree(testsuite).write(args.junit_report)

    # Print summary.
    print(
        ("\n\n" + output_color(ANSI_WHITE, "%d/%d tests succeeded.") + "\n") %
        (len(tests_to_run) - len(failed_tests), len(tests_to_run)))
    if len(failed_tests) > 0:
        print(output_color(ANSI_RED, "Tests failed.") + "\n")
        if kill_switch.is_set():
            print("Exiting early due to --fail-fast.")
        sys.exit(1)
    else:
        print(output_color(ANSI_GREEN, "All tests passed.") + "\n")
        sys.exit(0)
Esempio n. 5
0
def main():
    # Parse arguments
    parser = argparse.ArgumentParser(description="Simple Regression Framework")
    parser.add_argument("-s", "--strict", action="store_true",
            help="be strict when parsing test XML files")
    parser.add_argument("-d", "--directory", action="store",
            metavar="DIR", help="directory to search for test files",
            default=os.getcwd())
    parser.add_argument("--brief", action="store_true",
            help="don't print failure logs at end of test run")
    parser.add_argument("-l", "--list", action="store_true",
            help="list known tests")
    parser.add_argument("--legacy", action="store_true",
            help="use legacy 'IsaMakefile' specs")
    parser.add_argument("-v", "--verbose", action="store_true",
            help="print test output")
    parser.add_argument("tests", metavar="TESTS",
            help="tests to run (defaults to all tests)",
            nargs="*")
    args = parser.parse_args()

    # Search for test files:
    if not args.legacy:
        test_xml = sorted(rglob(args.directory, "tests.xml"))
        tests = testspec.parse_test_files(test_xml, strict=args.strict)
    else:
        # Fetch legacy tests.
        tests = testspec.legacy_testspec(args.directory)

    # List test names if requested.
    if args.list:
        for t in tests:
            print(t.name)
        sys.exit(0)

    # Calculate which tests should be run.
    tests_to_run = []
    if len(args.tests) == 0:
        tests_to_run = tests
    else:
        desired_names = set(args.tests)
        bad_names = desired_names - set([t.name for t in tests])
        if len(bad_names) > 0:
            args.error("Unknown test names: %s" % (", ".join(sorted(bad_names))))
        tests_to_run = [t for t in tests if t.name in desired_names]

    # Run the tests.
    print("Running %d test(s)...\n" % len(tests_to_run))
    failed_tests = set()
    failed_test_log = []
    for t in tests_to_run:
        if len(t.depends & failed_tests) > 0:
            print_test_line(t.name, ANSI_YELLOW, "skipped")
            failed_tests.add(t.name)
            continue

        # Run the test.
        print_test_line_start(t.name)
        (passed, log) = run_test(t, verbose=args.verbose)

        # Print result.
        if not passed:
            failed_tests.add(t.name)
            failed_test_log.append((t.name, log))
            print_test_line_end(t.name, ANSI_RED, "FAILED *")
        else:
            print_test_line_end(t.name, ANSI_GREEN, "pass")

    # Print failure summaries unless requested not to.
    if not args.brief and len(failed_test_log) > 0:
        LINE_LIMIT = 40
        def print_line():
            print("".join(["-" for x in range(72)]))
        print("")
        for (failed_test, log) in failed_test_log:
            print_line()
            print("TEST FAILURE: %s" % failed_test)
            print("")
            log = log.rstrip("\n") + "\n"
            lines = log.split("\n")
            if len(lines) > LINE_LIMIT:
                lines = ["..."] + lines[-LINE_LIMIT:]
            print("\n".join(lines))
        print_line()

    # Print summary.
    print(("\n\n"
            + output_color(ANSI_WHITE, "%d/%d tests succeeded.") + "\n")
            % (len(tests_to_run) - len(failed_tests), len(tests_to_run)))
    if len(failed_tests) > 0:
        print(output_color(ANSI_RED, "Tests failed.") + "\n")
        sys.exit(1)
    else:
        print(output_color(ANSI_GREEN, "All tests passed.") + "\n")
        sys.exit(0)
Esempio n. 6
0
def main():
    # Parse arguments
    parser = argparse.ArgumentParser(description="Simple Regression Framework")
    parser.add_argument("-s", "--strict", action="store_true",
            help="be strict when parsing test XML files")
    parser.add_argument("-d", "--directory", action="store",
            metavar="DIR", help="directory to search for test files",
            default=os.getcwd())
    parser.add_argument("-x", "--exclude", action="append",
            metavar="DIR", help="directory to exclude while searching for test files (multiple -x can be given)",
            default=[])
    parser.add_argument("--brief", action="store_true",
            help="don't print failure logs at end of test run")
    parser.add_argument("-l", "--list", action="store_true",
            help="list known tests")
    parser.add_argument("--legacy", action="store_true",
            help="use legacy 'IsaMakefile' specs")
    parser.add_argument("-v", "--verbose", action="store_true",
            help="print test output")
    parser.add_argument("--limit", action="store",
            help="set line limit for logs", default=40)
    parser.add_argument("tests", metavar="TESTS",
            help="tests to run (defaults to all tests)",
            nargs="*")
    args = parser.parse_args()

    # Search for test files:
    if not args.legacy:
        test_xml = sorted(xrglob(args.directory, "tests.xml", args.exclude))
        tests = testspec.parse_test_files(test_xml, strict=args.strict)
    else:
        # Fetch legacy tests.
        tests = testspec.legacy_testspec(args.directory)

    # List test names if requested.
    if args.list:
        for t in tests:
            print(t.name)
        sys.exit(0)

    # Calculate which tests should be run.
    tests_to_run = []
    if len(args.tests) == 0:
        tests_to_run = tests
    else:
        desired_names = set(args.tests)
        bad_names = desired_names - set([t.name for t in tests])
        if len(bad_names) > 0:
            parser.error("Unknown test names: %s" % (", ".join(sorted(bad_names))))
        tests_to_run = [t for t in tests if t.name in desired_names]

    # If running at least one test, and psutil is not available, print a warning.
    if len(tests_to_run) > 0 and not PS_UTIL_AVAILABLE:
        print("\n"
              "Warning: 'psutil' module not available. Processes may not be correctly\n"
              "stopped. Run\n"
              "\n"
              "    pip install --user psutil\n"
              "\n"
              "to install.\n"
              "\n")

    # Run the tests.
    print("Running %d test(s)...\n" % len(tests_to_run))
    failed_tests = set()
    failed_test_log = []
    for t in tests_to_run:
        if len(t.depends & failed_tests) > 0:
            print_test_line(t.name, ANSI_YELLOW, "skipped", None, None)
            failed_tests.add(t.name)
            continue

        # Run the test.
        print_test_line_start(t.name, verbose=args.verbose)
        (passed, status, log, time_taken, mem) = run_test(t, verbose=args.verbose)

        # Print result.
        if not passed:
            failed_tests.add(t.name)
            failed_test_log.append((t.name, log, time_taken))
            print_test_line_end(t.name, ANSI_RED, "%s *" % status, time_taken, mem, verbose=args.verbose)
        else:
            print_test_line_end(t.name, ANSI_GREEN, status, time_taken, mem, verbose=args.verbose)

    # Print failure summaries unless requested not to.
    if not args.brief and len(failed_test_log) > 0:
        print("")
        log_limit = int(args.limit)
        for (failed_test, log, _) in failed_test_log:
            print_line()
            print("TEST FAILURE: %s" % failed_test)
            print("")
            log = log.rstrip("\n") + "\n"
            lines = log.split("\n")
            if len(lines) > 2 * log_limit:
                lines = lines[:log_limit] + ["..."] + lines[-log_limit:]
            print("\n".join(lines))
        print_line()

    # Print summary.
    print(("\n\n"
            + output_color(ANSI_WHITE, "%d/%d tests succeeded.") + "\n")
            % (len(tests_to_run) - len(failed_tests), len(tests_to_run)))
    if len(failed_tests) > 0:
        print(output_color(ANSI_RED, "Tests failed.") + "\n")
        sys.exit(1)
    else:
        print(output_color(ANSI_GREEN, "All tests passed.") + "\n")
        sys.exit(0)