Example #1
0
def main():
    options = parse_args()
    test_directory = os.path.dirname(os.path.abspath(__file__))
    selected_tests = get_selected_tests(options)

    if options.verbose:
        print_to_stderr('Selected tests: {}'.format(', '.join(selected_tests)))

    if options.coverage and not PYTORCH_COLLECT_COVERAGE:
        shell(['coverage', 'erase'])

    if options.jit:
        selected_tests = filter(lambda test_name: "jit" in test_name, TESTS)

    if options.determine_from is not None and os.path.exists(options.determine_from):
        with open(options.determine_from, 'r') as fh:
            touched_files = [
                os.path.normpath(name.strip()) for name in fh.read().split('\n')
                if len(name.strip()) > 0
            ]
        # HACK: Ensure the 'test' paths can be traversed by Modulefinder
        sys.path.append('test')
        selected_tests = [
            test for test in selected_tests
            if determine_target(test, touched_files, options)
        ]
        sys.path.remove('test')

    has_failed = False
    failure_messages = []
    try:
        for test in selected_tests:
            options_clone = copy.deepcopy(options)
            if test in USE_PYTEST_LIST:
                options_clone.pytest = True
            err_message = run_test_module(test, test_directory, options_clone)
            if err_message is None:
                continue
            has_failed = True
            failure_messages.append(err_message)
            if not options_clone.continue_through_error:
                raise RuntimeError(err_message)
            print_to_stderr(err_message)
    finally:
        if options.coverage:
            from coverage import Coverage
            test_dir = os.path.dirname(os.path.abspath(__file__))
            with set_cwd(test_dir):
                cov = Coverage()
                if PYTORCH_COLLECT_COVERAGE:
                    cov.load()
                cov.combine(strict=False)
                cov.save()
                if not PYTORCH_COLLECT_COVERAGE:
                    cov.html_report()

    if options.continue_through_error and has_failed:
        for err in failure_messages:
            print_to_stderr(err)
        sys.exit(1)
Example #2
0
    def test_run_mypy(self):
        """
        Runs mypy over all files specified in mypy.ini
        Note that mypy.ini is not shipped in an installed version of PyTorch,
        so this test will only run mypy in a development setup or in CI.
        """
        def is_torch_mypyini(path_to_file):
            with open(path_to_file, 'r') as f:
                first_line = f.readline()

            if first_line.startswith('# This is the PyTorch MyPy config file'):
                return True

            return False

        test_dir = os.path.dirname(os.path.realpath(__file__))
        repo_rootdir = os.path.join(test_dir, '..')
        mypy_inifile = os.path.join(repo_rootdir, 'mypy.ini')
        if not (os.path.exists(mypy_inifile) and is_torch_mypyini(mypy_inifile)):
            self.skipTest("Can't find PyTorch MyPy config file")

        import numpy
        if numpy.__version__ == '1.20.0.dev0+7af1024':
            self.skipTest("Typeannotations in numpy-1.20.0-dev are broken")

        # TODO: Would be better not to chdir here, this affects the entire
        # process!
        with set_cwd(repo_rootdir):
            (stdout, stderr, result) = mypy.api.run([])

        if result != 0:
            self.fail(f"mypy failed: {stdout} {stderr}")
Example #3
0
    def test_doc_examples(self):
        """
        Run documentation examples through mypy.
        """
        fn = Path(__file__).resolve().parent / 'generated_type_hints_smoketest.py'
        with open(fn, "w") as f:
            print(get_all_examples(), file=f)

        # OK, so here's the deal.  mypy treats installed packages
        # and local modules differently: if a package is installed,
        # mypy will refuse to use modules from that package for type
        # checking unless the module explicitly says that it supports
        # type checking. (Reference:
        # https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports
        # )
        #
        # Now, PyTorch doesn't support typechecking, and we shouldn't
        # claim that it supports typechecking (it doesn't.) However, not
        # claiming we support typechecking is bad for this test, which
        # wants to use the partial information we get from the bits of
        # PyTorch which are typed to check if it typechecks.  And
        # although mypy will work directly if you are working in source,
        # some of our tests involve installing PyTorch and then running
        # its tests.
        #
        # The guidance we got from Michael Sullivan and Joshua Oreman,
        # and also independently developed by Thomas Viehmann,
        # is that we should create a fake directory and add symlinks for
        # the packages that should typecheck.  So that is what we do
        # here.
        #
        # If you want to run mypy by hand, and you run from PyTorch
        # root directory, it should work fine to skip this step (since
        # mypy will preferentially pick up the local files first).  The
        # temporary directory here is purely needed for CI.  For this
        # reason, we also still drop the generated file in the test
        # source folder, for ease of inspection when there are failures.
        with tempfile.TemporaryDirectory() as tmp_dir:
            try:
                os.symlink(
                    os.path.dirname(torch.__file__),
                    os.path.join(tmp_dir, 'torch'),
                    target_is_directory=True
                )
            except OSError:
                raise unittest.SkipTest('cannot symlink') from None
            repo_rootdir = Path(__file__).resolve().parent.parent
            # TODO: Would be better not to chdir here, this affects the
            # entire process!
            with set_cwd(str(repo_rootdir)):
                (stdout, stderr, result) = mypy.api.run([
                    '--cache-dir=.mypy_cache/doc',
                    '--no-strict-optional',  # needed because of torch.lu_unpack, see gh-36584
                    str(fn),
                ])
            if result != 0:
                self.fail(f"mypy failed:\n{stderr}\n{stdout}")
Example #4
0
def main():
    options = parse_args()

    test_directory = str(REPO_ROOT / "test")
    selected_tests = get_selected_tests(options)

    if options.verbose:
        print_to_stderr("Selected tests:\n {}".format(
            "\n ".join(selected_tests)))

    if options.dry_run:
        return

    if options.coverage and not PYTORCH_COLLECT_COVERAGE:
        shell(["coverage", "erase"])

    if IS_CI:
        selected_tests = get_reordered_tests(selected_tests)
        # downloading test cases configuration to local environment
        get_test_case_configs(dirpath=test_directory)

    has_failed = False
    failure_messages = []
    try:
        for test in selected_tests:
            options_clone = copy.deepcopy(options)
            if test in USE_PYTEST_LIST:
                options_clone.pytest = True
            err_message = run_test_module(test, test_directory, options_clone)
            if err_message is None:
                continue
            has_failed = True
            failure_messages.append(err_message)
            if not options_clone.continue_through_error:
                raise RuntimeError(err_message)
            print_to_stderr(err_message)
    finally:
        if options.coverage:
            from coverage import Coverage

            with set_cwd(test_directory):
                cov = Coverage()
                if PYTORCH_COLLECT_COVERAGE:
                    cov.load()
                cov.combine(strict=False)
                cov.save()
                if not PYTORCH_COLLECT_COVERAGE:
                    cov.html_report()

    if options.continue_through_error and has_failed:
        for err in failure_messages:
            print_to_stderr(err)
        sys.exit(1)
Example #5
0
    def test_run_mypy(self):
        """
        Runs mypy over all files specified in our mypy configs
        Note that our mypy configs are not shipped in an installed
        version of PyTorch, so this test will only run mypy in a
        development setup or in CI.
        """
        def is_torch_mypyini(path_to_file):
            with open(path_to_file, 'r') as f:
                first_line = f.readline()

            name = os.path.basename(path_to_file)
            if first_line.startswith(f'# This is the PyTorch {name} file'):
                return True

            return False

        # to add more configs, edit the implementation of the
        # config_files function rather than editing this test or adding
        # more tests to this suite
        for ini in config_files():
            with self.subTest(msg=ini):
                test_dir = os.path.dirname(os.path.realpath(__file__))
                repo_rootdir = os.path.join(test_dir, '..')
                mypy_inifile = os.path.join(repo_rootdir, ini)
                if not (os.path.exists(mypy_inifile)
                        and is_torch_mypyini(mypy_inifile)):
                    self.skipTest("Can't find PyTorch MyPy config file")

                import numpy
                # Mypy thinks that NumPy has no attribute "__version__" even though it has
                # Fixed in NumPy v1.20.1
                if numpy.__version__.startswith(
                        '1.20.0.dev0'):  # type: ignore[attr-defined]
                    self.skipTest(
                        "Typeannotations in numpy-1.20.0-dev are broken")

                # TODO: Would be better not to chdir here, this affects
                # the entire process!
                with set_cwd(repo_rootdir):
                    (stdout, stderr, result) = mypy.api.run([
                        '--config',
                        mypy_inifile,
                    ])

                if result != 0:
                    self.fail(f"mypy failed: {stdout} {stderr}")
Example #6
0
    def test_run_mypy_strict(self):
        """
        Runs mypy over all files specified in mypy-strict.ini
        """
        test_dir = os.path.dirname(os.path.realpath(__file__))
        repo_rootdir = os.path.join(test_dir, '..')
        mypy_inifile = os.path.join(repo_rootdir, 'mypy-strict.ini')
        if not os.path.exists(mypy_inifile):
            self.skipTest("Can't find PyTorch MyPy strict config file")

        with set_cwd(repo_rootdir):
            (stdout, stderr, result) = mypy.api.run([
                '--config', mypy_inifile,
            ])

        if result != 0:
            self.fail(f"mypy failed: {stdout} {stderr}")
Example #7
0
def main():
    options = parse_args()

    # TODO: move this export & download function in tools/ folder
    test_times_filename = options.export_past_test_times
    if test_times_filename:
        print(
            f'Exporting past test times from S3 to {test_times_filename}, no tests will be run.'
        )
        export_S3_test_times(test_times_filename)
        return

    specified_test_cases_filename = options.run_specified_test_cases
    if specified_test_cases_filename:
        print(
            f'Loading specified test cases to run from {specified_test_cases_filename}.'
        )
        global SPECIFIED_TEST_CASES_DICT
        SPECIFIED_TEST_CASES_DICT = get_specified_test_cases(
            specified_test_cases_filename, TESTS)

    test_directory = os.path.dirname(os.path.abspath(__file__))
    selected_tests = get_selected_tests(options)

    if options.verbose:
        print_to_stderr('Selected tests: {}'.format(', '.join(selected_tests)))

    if options.coverage and not PYTORCH_COLLECT_COVERAGE:
        shell(['coverage', 'erase'])

    if options.jit:
        selected_tests = filter(lambda test_name: "jit" in test_name, TESTS)

    if options.determine_from is not None and os.path.exists(
            options.determine_from):
        slow_tests = get_slow_tests_based_on_S3(TESTS, TARGET_DET_LIST,
                                                SLOW_TEST_THRESHOLD)
        print(
            'Added the following tests to target_det tests as calculated based on S3:'
        )
        print(slow_tests)
        with open(options.determine_from, 'r') as fh:
            touched_files = [
                os.path.normpath(name.strip())
                for name in fh.read().split('\n') if len(name.strip()) > 0
            ]
        # HACK: Ensure the 'test' paths can be traversed by Modulefinder
        sys.path.append('test')
        selected_tests = [
            test for test in selected_tests
            if determine_target(TARGET_DET_LIST +
                                slow_tests, test, touched_files, options)
        ]
        sys.path.remove('test')

    if IS_IN_CI:
        selected_tests = get_reordered_tests(selected_tests,
                                             ENABLE_PR_HISTORY_REORDERING)
        # downloading test cases configuration to local environment
        get_test_case_configs(
            dirpath=os.path.dirname(os.path.abspath(__file__)))

    has_failed = False
    failure_messages = []
    try:
        for test in selected_tests:
            options_clone = copy.deepcopy(options)
            if test in USE_PYTEST_LIST:
                options_clone.pytest = True
            err_message = run_test_module(test, test_directory, options_clone)
            if err_message is None:
                continue
            has_failed = True
            failure_messages.append(err_message)
            if not options_clone.continue_through_error:
                raise RuntimeError(err_message)
            print_to_stderr(err_message)
    finally:
        if options.coverage:
            from coverage import Coverage
            test_dir = os.path.dirname(os.path.abspath(__file__))
            with set_cwd(test_dir):
                cov = Coverage()
                if PYTORCH_COLLECT_COVERAGE:
                    cov.load()
                cov.combine(strict=False)
                cov.save()
                if not PYTORCH_COLLECT_COVERAGE:
                    cov.html_report()

    if options.continue_through_error and has_failed:
        for err in failure_messages:
            print_to_stderr(err)
        sys.exit(1)
Example #8
0
def main():
    options = parse_args()

    # TODO: move this export & download function in tools/ folder
    test_times_filename = options.export_past_test_times
    if test_times_filename:
        print(
            f"Exporting past test times from S3 to {test_times_filename}, no tests will be run."
        )
        export_S3_test_times(test_times_filename)
        return

    specified_test_cases_filename = options.run_specified_test_cases
    if specified_test_cases_filename:
        print(
            f"Loading specified test cases to run from {specified_test_cases_filename}."
        )
        global SPECIFIED_TEST_CASES_DICT
        SPECIFIED_TEST_CASES_DICT = get_specified_test_cases(
            specified_test_cases_filename, TESTS
        )

    test_directory = str(REPO_ROOT / "test")
    selected_tests = get_selected_tests(options)

    if options.verbose:
        print_to_stderr("Selected tests:\n {}".format("\n ".join(selected_tests)))

    if options.dry_run:
        return

    if options.coverage and not PYTORCH_COLLECT_COVERAGE:
        shell(["coverage", "erase"])

    # NS: Disable target determination until it can be made more reliable
    # if options.determine_from is not None and os.path.exists(options.determine_from):
    #     slow_tests = get_slow_tests_based_on_S3(
    #         TESTS, TARGET_DET_LIST, SLOW_TEST_THRESHOLD
    #     )
    #     print_to_stderr(
    #         "Added the following tests to target_det tests as calculated based on S3:"
    #     )
    #     print_to_stderr(slow_tests)
    #     with open(options.determine_from, "r") as fh:
    #         touched_files = [
    #             os.path.normpath(name.strip())
    #             for name in fh.read().split("\n")
    #             if len(name.strip()) > 0
    #         ]
    #     # HACK: Ensure the 'test' paths can be traversed by Modulefinder
    #     sys.path.append(test_directory)
    #     selected_tests = [
    #         test
    #         for test in selected_tests
    #         if should_run_test(
    #             TARGET_DET_LIST + slow_tests, test, touched_files, options
    #         )
    #     ]
    #     sys.path.remove(test_directory)

    if IS_IN_CI:
        selected_tests = get_reordered_tests(
            selected_tests, ENABLE_PR_HISTORY_REORDERING
        )
        # downloading test cases configuration to local environment
        get_test_case_configs(dirpath=test_directory)

    has_failed = False
    failure_messages = []
    try:
        for test in selected_tests:
            options_clone = copy.deepcopy(options)
            if test in USE_PYTEST_LIST:
                options_clone.pytest = True
            err_message = run_test_module(test, test_directory, options_clone)
            if err_message is None:
                continue
            has_failed = True
            failure_messages.append(err_message)
            if not options_clone.continue_through_error:
                raise RuntimeError(err_message)
            print_to_stderr(err_message)
    finally:
        if options.coverage:
            from coverage import Coverage

            with set_cwd(test_directory):
                cov = Coverage()
                if PYTORCH_COLLECT_COVERAGE:
                    cov.load()
                cov.combine(strict=False)
                cov.save()
                if not PYTORCH_COLLECT_COVERAGE:
                    cov.html_report()

    if options.continue_through_error and has_failed:
        for err in failure_messages:
            print_to_stderr(err)
        sys.exit(1)