예제 #1
0
def main():

    working_dir = Path.home().joinpath(".vw_runtests_working_dir")
    test_ref_dir = Path(os.path.dirname(os.path.abspath(__file__)))

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-t', "--test", type=int,
                        action='append', nargs='+', help="Run specific tests and ignore all others")
    parser.add_argument('-E', "--epsilon", type=float, default=1e-4,
                        help="Tolerance used when comparing floats. Only used if --fuzzy_compare is also supplied")
    parser.add_argument('-e', "--exit_first_fail", action='store_true',
                        help="If supplied, will exit after the first failure")
    parser.add_argument('-o', "--overwrite", action='store_true',
                        help="If test output differs from the reference file, overwrite the contents")
    parser.add_argument('-f', "--fuzzy_compare", action='store_true',
                        help="Allow for some tolerance when comparing floats")
    parser.add_argument("--ignore_dirty", action='store_true',
                        help="The test ref dir is checked for dirty files which may cause false negatives. Pass this flag to skip this check.")
    parser.add_argument("--clean_dirty", action='store_true',
                        help="The test ref dir is checked for dirty files which may cause false negatives. Pass this flag to remove those files.")
    parser.add_argument("--working_dir", default=working_dir,
                        help="Directory to save test outputs to")
    parser.add_argument("--ref_dir", default=test_ref_dir,
                        help="Directory to read test input files from")
    parser.add_argument('-j', "--jobs", type=int, default=4,
                        help="Number of tests to run in parallel")
    parser.add_argument(
        '--vw_bin_path', help="Specify VW binary to use. Otherwise, binary will be searched for in build directory")
    parser.add_argument('--spanning_tree_bin_path',
                        help="Specify spanning tree binary to use. Otherwise, binary will be searched for in build directory")
    parser.add_argument("--test_spec", type=str,
                        help="Optional. If passed the given JSON test spec will be used, " +
                        "otherwise a test spec will be autogenerated from the RunTests test definitions")
    parser.add_argument('--no_color', action='store_true',
                        help="Don't print color ANSI escape codes")
    parser.add_argument('--for_flatbuffers', action='store_true', help='Transform all of the test inputs into flatbuffer format and run tests')
    parser.add_argument('--to_flatbuff_path', help="Specify to_flatbuff binary to use. Otherwise, binary will be searched for in build directory")
    parser.add_argument('--include_flatbuffers', action='store_true', help="Don't skip the explicit flatbuffer tests from default run_tests run")
    args = parser.parse_args()

    if args.for_flatbuffers and args.working_dir == working_dir: # user did not supply dir
        args.working_dir = Path.home().joinpath(".vw_fb_runtests_working_dir")      

    test_base_working_dir = str(args.working_dir)
    test_base_ref_dir = str(args.ref_dir)

    color_enum = NoColor if args.no_color else Color

    # Flatten nested lists for arg.test argument.
    # Ideally we would have used action="extend", but that was added in 3.8
    if args.test is not None:
        args.test = [item for sublist in args.test for item in sublist]

    if Path(test_base_working_dir).is_file():
        print("--working_dir='{}' cannot be a file".format((test_base_working_dir)))
        sys.exit(1)

    if not Path(test_base_working_dir).exists():
        Path(test_base_working_dir).mkdir(parents=True, exist_ok=True)

    if not Path(test_base_ref_dir):
        print("--ref_dir='{}' doesn't exist".format((test_base_ref_dir)))
        sys.exit(1)

    if args.clean_dirty:
        clean_dirty(test_base_ref_dir)

    if not args.ignore_dirty:
        do_dirty_check(test_base_ref_dir)

    print("Testing on: hostname={}, OS={}, num_jobs={}".format(
        (socket.gethostname()), (sys.platform), (args.jobs)))

    vw_bin = find_vw_binary(test_base_ref_dir, args.vw_bin_path)
    print("Using VW binary: {}".format((vw_bin)))

    spanning_tree_bin = find_spanning_tree_binary(
        test_base_ref_dir, args.spanning_tree_bin_path)
    print("Using spanning tree binary: {}".format((spanning_tree_bin)))

    if args.test_spec is None:
        runtests_file = find_runtests_file(test_base_ref_dir)
        tests = runtests_parser.file_to_obj(runtests_file)
        tests = [x.__dict__ for x in tests]
        print("Tests parsed from RunTests file: {}".format((runtests_file)))
    else:
        json_test_spec_content = open(args.test_spec).read()
        tests = json.loads(json_test_spec_content)
        print("Tests read from test spec file: {}".format((args.test_spec)))

    print()

    if args.for_flatbuffers:
        to_flatbuff = find_to_flatbuf_binary(test_base_ref_dir, args.to_flatbuff_path)
        tests = convert_tests_for_flatbuffers(tests, to_flatbuff, args.working_dir, color_enum)

    # Because bash_command based tests don't specify all inputs and outputs they must operate in the test directory directly.
    # This means that if they run in parallel they can break each other by touching the same files.
    # Until we can move to a test spec which allows us to specify the input/output we need to add dependencies between them here.
    prev_bash_test = None
    for test in tests:
        test_number = test["id"]
        if "bash_command" in test:
            if prev_bash_test is not None:
                if "depends_on" not in tests[test_number - 1]:
                    tests[test_number - 1]["depends_on"] = []
                tests[test_number - 1]["depends_on"].append(prev_bash_test)
            prev_bash_test = test_number

    tasks = []
    completed_tests = Completion()
    tests_to_run_explicitly = None
    if args.test is not None:
        tests_to_run_explicitly = calculate_test_to_run_explicitly(args.test, tests)
        print("Running tests: {}".format((list(tests_to_run_explicitly))))
        if len(args.test) != len(tests_to_run_explicitly):
            print(
                "Note: due to test dependencies, more than just tests {} must be run".format((args.test)))

    executor = ThreadPoolExecutor(max_workers=args.jobs)
    for test in tests:
        test_number = test["id"]
        if tests_to_run_explicitly is not None and test_number not in tests_to_run_explicitly:
            continue

        dependencies = None
        if "depends_on" in test:
            dependencies = test["depends_on"]

        input_files = []
        if "input_files" in test:
            input_files = test["input_files"]

        is_shell = False
        if "bash_command" in test:
            if sys.platform == "win32":
                print(
                    "Skipping test number '{}' as bash_command is unsupported on Windows.".format((test_number)))
                continue
            command_line = test['bash_command'].format(
                VW=vw_bin, SPANNING_TREE=spanning_tree_bin)
            is_shell = True
        elif "vw_command" in test:
            command_line = "{} {}".format((vw_bin), (test['vw_command']))
            if not args.include_flatbuffers and not args.for_flatbuffers:
                if '--flatbuffer' in test['vw_command']:
                    print("{} is a flatbuffer test, can be run with --include_flatbuffers flag, Skipping...".format(test_number))
                    continue
        else:
            print("{} is an unknown type. Skipping...".format((test_number)))
            continue

        tasks.append(executor.submit(run_command_line_test,
                                     test_number,
                                     command_line,
                                     test["diff_files"],
                                     overwrite=args.overwrite,
                                     epsilon=args.epsilon,
                                     is_shell=is_shell,
                                     input_files=input_files,
                                     base_working_dir=test_base_working_dir,
                                     ref_dir=test_base_ref_dir,
                                     completed_tests=completed_tests,
                                     dependencies=dependencies,
                                     fuzzy_compare=args.fuzzy_compare))

    num_success = 0
    num_fail = 0
    num_skip = 0
    while len(tasks) > 0:
        try:
            test_number, result = tasks[0].result()
        except Exception:
            print("----------------")
            traceback.print_exc()
            num_fail += 1
            print("----------------")
            if args.exit_first_fail:
                for task in tasks:
                    task.cancel()
                sys.exit(1)
            continue
        finally:
            tasks.pop(0)

        success_text = "{}Success{}".format(
            (color_enum.LIGHT_GREEN), (color_enum.ENDC))
        fail_text = "{}Fail{}".format(
            (color_enum.LIGHT_RED), (color_enum.ENDC))
        skipped_text = "{}Skip{}".format(
            (color_enum.LIGHT_CYAN), (color_enum.ENDC))
        num_success += result['result'] == Result.SUCCESS
        num_fail += result['result'] == Result.FAIL
        num_skip += result['result'] == Result.SKIPPED

        if result['result'] == Result.SUCCESS:
            result_text = success_text
        elif result['result'] == Result.FAIL:
            result_text = fail_text
        else:
            result_text = skipped_text

        print("Test {}: {}".format((test_number), (result_text)))
        if not result['result'] == Result.SUCCESS:
            test = tests[test_number - 1]
            print("\tDescription: {}".format((test['desc'])))
            if 'vw_command' in test:
                print("\tvw_command: \"{}\"".format((test['vw_command'])))
            if 'bash_command' in test:
                print("\tbash_command: \"{}\"".format((test['bash_command'])))
        for name, check in result["checks"].items():
            # Don't print exit_code check as it is too much noise.
            if check['success'] and name == "exit_code":
                continue
            print(
                "\t[{}] {}: {}".format((name), (success_text if check['success'] else fail_text), (check['message'])))
            if not check['success']:
                if name == "exit_code":
                    print("---- stdout ----")
                    print(result["checks"]["exit_code"]["stdout"])
                    print("---- stderr ----")
                    print(result["checks"]["exit_code"]["stderr"])

                if "diff" in check:
                    print()
                    print_colored_diff(check["diff"], color_enum)
                    print()
                if args.exit_first_fail:
                    for task in tasks:
                        task.cancel()
                    sys.exit(1)
    print("-----")
    print("# Success: {}".format((num_success)))
    print("# Fail: {}".format((num_fail)))
    print("# Skip: {}".format((num_skip)))

    if num_fail > 0:
        sys.exit(1)
예제 #2
0
def get_latest_tests():
    import runtests_parser as rtp
    tests = rtp.file_to_obj(rtp.find_runtest_file())
    return [x.__dict__ for x in tests]
예제 #3
0
def main():
    working_dir = Path.home().joinpath(".vw_runtests_working_dir")
    test_ref_dir = Path(os.path.dirname(os.path.abspath(__file__)))

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    parser.add_argument(
        "-t",
        "--test",
        type=int,
        action="append",
        nargs="+",
        help="Run specific tests and ignore all others",
    )
    parser.add_argument(
        "-E",
        "--epsilon",
        type=float,
        default=1e-4,
        help="Tolerance used when comparing floats. Only used if --fuzzy_compare is also supplied",
    )
    parser.add_argument(
        "-e",
        "--exit_first_fail",
        action="store_true",
        help="If supplied, will exit after the first failure",
    )
    parser.add_argument(
        "-o",
        "--overwrite",
        action="store_true",
        help="If test output differs from the reference file, overwrite the contents",
    )
    parser.add_argument(
        "-f",
        "--fuzzy_compare",
        action="store_true",
        help="Allow for some tolerance when comparing floats",
    )
    parser.add_argument(
        "--ignore_dirty",
        action="store_true",
        help="The test ref dir is checked for dirty files which may cause false negatives. Pass this flag to skip this check.",
    )
    parser.add_argument(
        "--clean_dirty",
        action="store_true",
        help="The test ref dir is checked for dirty files which may cause false negatives. Pass this flag to remove those files.",
    )
    parser.add_argument(
        "--working_dir", default=working_dir, help="Directory to save test outputs to"
    )
    parser.add_argument(
        "--ref_dir",
        default=test_ref_dir,
        help="Directory to read test input files from",
    )
    parser.add_argument(
        "-j", "--jobs", type=int, default=4, help="Number of tests to run in parallel"
    )
    parser.add_argument(
        "--vw_bin_path",
        help="Specify VW binary to use. Otherwise, binary will be searched for in build directory",
    )
    parser.add_argument(
        "--spanning_tree_bin_path",
        help="Specify spanning tree binary to use. Otherwise, binary will be searched for in build directory",
    )
    parser.add_argument(
        "--skip_spanning_tree_tests",
        help="Skip tests that use spanning tree",
        action="store_true",
    )
    parser.add_argument(
        "--test_spec",
        type=str,
        help="Optional. If passed the given JSON test spec will be used, "
        + "otherwise a test spec will be autogenerated from the RunTests test definitions",
    )
    parser.add_argument(
        "--no_color", action="store_true", help="Don't print color ANSI escape codes"
    )
    parser.add_argument(
        "--for_flatbuffers",
        action="store_true",
        help="Transform all of the test inputs into flatbuffer format and run tests",
    )
    parser.add_argument(
        "--to_flatbuff_path",
        help="Specify to_flatbuff binary to use. Otherwise, binary will be searched for in build directory",
    )
    parser.add_argument(
        "--include_flatbuffers",
        action="store_true",
        help="Don't skip the explicit flatbuffer tests from default run_tests run",
    )
    parser.add_argument(
        "--valgrind", action="store_true", help="Run tests with Valgrind"
    )
    args = parser.parse_args()

    if (
        args.for_flatbuffers and args.working_dir == working_dir
    ):  # user did not supply dir
        args.working_dir = Path.home().joinpath(".vw_fb_runtests_working_dir")

    test_base_working_dir = str(args.working_dir)
    test_base_ref_dir = str(args.ref_dir)

    color_enum = NoColor if args.no_color else Color

    if args.valgrind and not is_valgrind_available():
        print("Can't find valgrind")
        sys.exit(1)

    # Flatten nested lists for arg.test argument.
    # Ideally we would have used action="extend", but that was added in 3.8
    if args.test is not None:
        args.test = [item for sublist in args.test for item in sublist]

    if Path(test_base_working_dir).is_file():
        print("--working_dir='{}' cannot be a file".format((test_base_working_dir)))
        sys.exit(1)

    if not Path(test_base_working_dir).exists():
        Path(test_base_working_dir).mkdir(parents=True, exist_ok=True)

    if not Path(test_base_ref_dir):
        print("--ref_dir='{}' doesn't exist".format((test_base_ref_dir)))
        sys.exit(1)

    if args.clean_dirty:
        clean_dirty(test_base_ref_dir)

    if not args.ignore_dirty:
        do_dirty_check(test_base_ref_dir)

    print(
        "Testing on: hostname={}, OS={}, num_jobs={}".format(
            (socket.gethostname()), (sys.platform), (args.jobs)
        )
    )

    vw_bin = find_vw_binary(test_base_ref_dir, args.vw_bin_path)
    print("Using VW binary: {}".format((vw_bin)))

    spanning_tree_bin: Optional[str] = None
    if not args.skip_spanning_tree_tests:
        spanning_tree_bin = find_spanning_tree_binary(
            test_base_ref_dir, args.spanning_tree_bin_path
        )
        print("Using spanning tree binary: {}".format((spanning_tree_bin)))

    if args.test_spec is None:
        runtests_file = find_runtests_file(test_base_ref_dir)
        tests = runtests_parser.file_to_obj(runtests_file)
        tests = [x.__dict__ for x in tests]
        print("Tests parsed from RunTests file: {}".format((runtests_file)))
    else:
        json_test_spec_content = open(args.test_spec).read()
        tests = json.loads(json_test_spec_content)
        print("Tests read from test spec file: {}".format((args.test_spec)))

    tests = convert_to_test_data(tests, vw_bin, spanning_tree_bin)

    print()

    # Filter the test list if the requested tests were explicitly specified
    tests_to_run_explicitly = None
    if args.test is not None:
        tests_to_run_explicitly = calculate_test_to_run_explicitly(args.test, tests)
        print("Running tests: {}".format((list(tests_to_run_explicitly))))
        if len(args.test) != len(tests_to_run_explicitly):
            print(
                "Note: due to test dependencies, more than just tests {} must be run".format(
                    (args.test)
                )
            )
        tests = list(filter(lambda x: x.id in tests_to_run_explicitly, tests))

    # Filter out flatbuffer tests if not specified
    if not args.include_flatbuffers and not args.for_flatbuffers:
        for test in tests:
            if "--flatbuffer" in test.command_line:
                test.skip = True
                test.skip_reason = "This is a flatbuffer test, can be run with --include_flatbuffers flag"

    if args.for_flatbuffers:
        to_flatbuff = find_to_flatbuf_binary(test_base_ref_dir, args.to_flatbuff_path)
        tests = convert_tests_for_flatbuffers(
            tests, to_flatbuff, args.working_dir, color_enum
        )

    # Because bash_command based tests don't specify all inputs and outputs they must operate in the test directory directly.
    # This means that if they run in parallel they can break each other by touching the same files.
    # Until we can move to a test spec which allows us to specify the input/output we need to add dependencies between them here.
    prev_bash_test = None
    for test in tests:
        if test.is_shell:
            if prev_bash_test is not None:
                test.depends_on.append(prev_bash_test.id)
            prev_bash_test = test

    tasks = []
    completed_tests = Completion()

    executor = ThreadPoolExecutor(max_workers=args.jobs)

    for test in tests:
        tasks.append(
            executor.submit(
                run_command_line_test,
                test,
                overwrite=args.overwrite,
                epsilon=args.epsilon,
                base_working_dir=test_base_working_dir,
                ref_dir=test_base_ref_dir,
                completed_tests=completed_tests,
                fuzzy_compare=args.fuzzy_compare,
                valgrind=args.valgrind,
            )
        )

    num_success = 0
    num_fail = 0
    num_skip = 0
    while len(tasks) > 0:
        try:
            test_number, result = tasks[0].result()
        except Exception:
            print("----------------")
            traceback.print_exc()
            num_fail += 1
            print("----------------")
            if args.exit_first_fail:
                for task in tasks:
                    task.cancel()
                sys.exit(1)
            continue
        finally:
            tasks.pop(0)

        success_text = "{}Success{}".format((color_enum.LIGHT_GREEN), (color_enum.ENDC))
        fail_text = "{}Fail{}".format((color_enum.LIGHT_RED), (color_enum.ENDC))
        skipped_text = "{}Skip{}".format((color_enum.LIGHT_CYAN), (color_enum.ENDC))
        num_success += result["result"] == Result.SUCCESS
        num_fail += result["result"] == Result.FAIL
        num_skip += result["result"] == Result.SKIPPED

        if result["result"] == Result.SUCCESS:
            result_text = success_text
        elif result["result"] == Result.FAIL:
            result_text = fail_text
        elif result["result"] == Result.SKIPPED:
            result_text = skipped_text + " ({})".format(
                result["skip_reason"]
                if result["skip_reason"] is not None
                else "unknown reason"
            )

        print("Test {}: {}".format((test_number), (result_text)))
        if result["result"] != Result.SUCCESS:
            test = get_test(test_number, tests)
            # Since this test produced a result - it must be in the tests list
            assert test is not None
            print("\tDescription: {}".format(test.description))
            print(
                '\t{} _command: "{}"'.format(
                    "bash" if test.is_shell else "vw", test.command_line
                )
            )
        for name, check in result["checks"].items():
            # Don't print exit_code check as it is too much noise.
            if check["success"] and name == "exit_code":
                continue
            print(
                "\t[{}] {}: {}".format(
                    name,
                    success_text if check["success"] else fail_text,
                    check["message"],
                )
            )
            if not check["success"]:
                if name == "exit_code":
                    print("---- stdout ----")
                    print(result["checks"]["exit_code"]["stdout"])
                    print("---- stderr ----")
                    print(result["checks"]["exit_code"]["stderr"])

                if "diff" in check:
                    print()
                    print_colored_diff(check["diff"], color_enum)
                    print()
                if args.exit_first_fail:
                    for task in tasks:
                        task.cancel()
                    sys.exit(1)
    print("-----")
    print("# Success: {}".format(num_success))
    print("# Fail: {}".format(num_fail))
    print("# Skip: {}".format(num_skip))

    if num_fail > 0:
        sys.exit(1)