Esempio n. 1
0
 def test_calculate_2_shards_with_incomplete_test_times(self) -> None:
     incomplete_test_times = {k: v for k, v in self.test_times.items() if 'test1' in k}
     expected_shards = [
         (22.0, ['long_test1', 'long_test2', 'normal_test3', 'short_test3', 'short_test5']),
         (10.0, ['normal_test1', 'short_test1', 'super_long_test', 'normal_test2', 'short_test2', 'short_test4']),
     ]
     self.assert_shards_equal(expected_shards, calculate_shards(2, self.tests, incomplete_test_times))
Esempio n. 2
0
 def test_calculate_2_shards_with_complete_test_times(self) -> None:
     expected_shards = [
         (60, ['super_long_test', 'normal_test3']),
         (58.31, ['long_test1', 'long_test2', 'normal_test1', 'normal_test2', 'short_test1', 'short_test2',
                  'short_test3', 'short_test4', 'short_test5'])
     ]
     self.assert_shards_equal(expected_shards, calculate_shards(2, self.tests, self.test_times))
Esempio n. 3
0
 def test_calculate_5_shards_with_complete_test_times(self) -> None:
     expected_shards = [
         (55.0, ["super_long_test"]),
         (
             22.0,
             [
                 "long_test1",
             ],
         ),
         (
             18.0,
             [
                 "long_test2",
             ],
         ),
         (
             11.31,
             [
                 "normal_test1",
                 "short_test1",
                 "short_test2",
                 "short_test3",
                 "short_test4",
                 "short_test5",
             ],
         ),
         (12.0, ["normal_test2", "normal_test3"]),
     ]
     self.assert_shards_equal(
         expected_shards, calculate_shards(5, self.tests, self.test_times)
     )
Esempio n. 4
0
 def test_calculate_2_shards_against_optimal_shards(self) -> None:
     for _ in range(100):
         random.seed(120)
         random_times = {k: random.random() * 10 for k in self.tests}
         # all test times except first two
         rest_of_tests = [
             i
             for k, i in random_times.items()
             if k != "super_long_test" and k != "long_test1"
         ]
         sum_of_rest = sum(rest_of_tests)
         random_times["super_long_test"] = max(sum_of_rest / 2, max(rest_of_tests))
         random_times["long_test1"] = sum_of_rest - random_times["super_long_test"]
         # An optimal sharding would look like the below, but we don't need to compute this for the test:
         # optimal_shards = [
         #     (sum_of_rest, ['super_long_test', 'long_test1']),
         #     (sum_of_rest, [i for i in self.tests if i != 'super_long_test' and i != 'long_test1']),
         # ]
         calculated_shards = calculate_shards(2, self.tests, random_times)
         max_shard_time = max(calculated_shards[0][0], calculated_shards[1][0])
         if sum_of_rest != 0:
             # The calculated shard should not have a ratio worse than 7/6 for num_shards = 2
             self.assertGreaterEqual(7.0 / 6.0, max_shard_time / sum_of_rest)
             sorted_tests = sorted(self.tests)
             sorted_shard_tests = sorted(
                 calculated_shards[0][1] + calculated_shards[1][1]
             )
             # All the tests should be represented by some shard
             self.assertEqual(sorted_tests, sorted_shard_tests)
Esempio n. 5
0
 def test_calculate_2_shards_with_incomplete_test_times(self) -> None:
     incomplete_test_times = {
         k: v for k, v in self.test_times.items() if "test1" in k
     }
     expected_shards = [
         (
             22.0,
             [
                 "long_test1",
                 "long_test2",
                 "normal_test3",
                 "short_test3",
                 "short_test5",
             ],
         ),
         (
             10.0,
             [
                 "normal_test1",
                 "short_test1",
                 "super_long_test",
                 "normal_test2",
                 "short_test2",
                 "short_test4",
             ],
         ),
     ]
     self.assert_shards_equal(
         expected_shards, calculate_shards(2, self.tests, incomplete_test_times)
     )
Esempio n. 6
0
 def test_calculate_5_shards_with_complete_test_times(self) -> None:
     expected_shards = [
         (55.0, ['super_long_test']),
         (22.0, ['long_test1', ]),
         (18.0, ['long_test2', ]),
         (11.31, ['normal_test1', 'short_test1', 'short_test2', 'short_test3', 'short_test4', 'short_test5']),
         (12.0, ['normal_test2', 'normal_test3']),
     ]
     self.assert_shards_equal(expected_shards, calculate_shards(5, self.tests, self.test_times))
Esempio n. 7
0
 def test_calculate_2_shards_with_complete_test_times(self) -> None:
     expected_shards = [
         (60, ["super_long_test", "normal_test3"]),
         (
             58.31,
             [
                 "long_test1",
                 "long_test2",
                 "normal_test1",
                 "normal_test2",
                 "short_test1",
                 "short_test2",
                 "short_test3",
                 "short_test4",
                 "short_test5",
             ],
         ),
     ]
     self.assert_shards_equal(
         expected_shards, calculate_shards(2, self.tests, self.test_times))
Esempio n. 8
0
def get_selected_tests(options):
    selected_tests = options.include

    # filter if there's JIT only and distributed only test options
    if options.jit:
        selected_tests = list(
            filter(lambda test_name: "jit" in test_name, selected_tests))

    if options.distributed_tests:
        selected_tests = list(
            filter(lambda test_name: test_name in DISTRIBUTED_TESTS,
                   selected_tests))

    # Filter to only run core tests when --core option is specified
    if options.core:
        selected_tests = list(
            filter(lambda test_name: test_name in CORE_TEST_LIST,
                   selected_tests))

    # process reordering
    if options.bring_to_front:
        to_front = set(options.bring_to_front)
        selected_tests = options.bring_to_front + list(
            filter(lambda name: name not in to_front, selected_tests))

    if options.first:
        first_index = find_test_index(options.first, selected_tests)
        selected_tests = selected_tests[first_index:]

    if options.last:
        last_index = find_test_index(options.last,
                                     selected_tests,
                                     find_last_index=True)
        selected_tests = selected_tests[:last_index + 1]

    # process exclusion
    if options.exclude_jit_executor:
        options.exclude.extend(JIT_EXECUTOR_TESTS)

    if options.exclude_distributed_tests:
        options.exclude.extend(DISTRIBUTED_TESTS)

    # these tests failing in CUDA 11.6 temporary disabling. issue https://github.com/pytorch/pytorch/issues/75375
    if torch.version.cuda is not None and LooseVersion(
            torch.version.cuda) == "11.6":
        options.exclude.extend(["distributions/test_constraints"])

    selected_tests = exclude_tests(options.exclude, selected_tests)

    if sys.platform == "win32" and not options.ignore_win_blocklist:
        target_arch = os.environ.get("VSCMD_ARG_TGT_ARCH")
        if target_arch != "x64":
            WINDOWS_BLOCKLIST.append("cpp_extensions_aot_no_ninja")
            WINDOWS_BLOCKLIST.append("cpp_extensions_aot_ninja")
            WINDOWS_BLOCKLIST.append("cpp_extensions_jit")
            WINDOWS_BLOCKLIST.append("jit")
            WINDOWS_BLOCKLIST.append("jit_fuser")

        # This is exception that's caused by this issue https://github.com/pytorch/pytorch/issues/69460
        # This below code should be removed once this issue is solved
        if torch.version.cuda is not None and LooseVersion(
                torch.version.cuda) >= "11.5":
            WINDOWS_BLOCKLIST.append("test_cpp_extensions_aot")
            WINDOWS_BLOCKLIST.append("test_cpp_extensions_aot_ninja")
            WINDOWS_BLOCKLIST.append("test_cpp_extensions_aot_no_ninja")

        selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests,
                                       "on Windows")

    elif TEST_WITH_ROCM:
        selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests,
                                       "on ROCm")

    # sharding
    if options.shard:
        assert len(options.shard) == 2, "Unexpected shard format"
        assert min(options.shard) > 0, "Shards must be positive numbers"
        which_shard, num_shards = options.shard
        assert (
            which_shard <= num_shards
        ), "Selected shard must be less than or equal to total number of shards"
        assert num_shards <= len(
            selected_tests
        ), f"Number of shards must be less than {len(selected_tests)}"

        if num_shards == 1:
            return selected_tests

        # Download previous test times to make sharding decisions
        test_file_times = get_test_times(str(REPO_ROOT),
                                         filename=TEST_TIMES_FILE)
        if len(test_file_times) == 0:
            print(
                "::warning:: Gathered no stats from S3. Proceeding with default sharding plan."
            )
            selected_tests = selected_tests[which_shard - 1::num_shards]
        else:
            shards = calculate_shards(num_shards, selected_tests,
                                      test_file_times)
            _, tests_from_shard = shards[which_shard - 1]
            selected_tests = tests_from_shard

    # skip all distributed tests if distributed package is not available.
    if not dist.is_available():
        selected_tests = exclude_tests(
            DISTRIBUTED_TESTS, selected_tests,
            "PyTorch is built without distributed support.")

    # skip tests that require LAPACK when it's not available
    if not torch._C.has_lapack:
        selected_tests = exclude_tests(
            TESTS_REQUIRING_LAPACK, selected_tests,
            "PyTorch is built without LAPACK support.")

    return selected_tests