def hotpatch_var(var): if check_env_flag('NO_' + var): os.environ['USE_' + var] = '0' elif check_negative_env_flag('NO_' + var): os.environ['USE_' + var] = '1' elif check_env_flag('WITH_' + var): os.environ['USE_' + var] = '1' elif check_negative_env_flag('WITH_' + var): os.environ['USE_' + var] = '0'
def hotpatch_var(var, prefix='USE_'): if check_env_flag('NO_' + var): os.environ[prefix + var] = '0' elif check_negative_env_flag('NO_' + var): os.environ[prefix + var] = '1' elif check_env_flag('WITH_' + var): os.environ[prefix + var] = '1' elif check_negative_env_flag('WITH_' + var): os.environ[prefix + var] = '0'
NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.mkldnn import (USE_MKLDNN, MKLDNN_LIBRARY, MKLDNN_LIB_DIR, MKLDNN_INCLUDE_DIR) from tools.setup_helpers.nnpack import USE_NNPACK from tools.setup_helpers.qnnpack import USE_QNNPACK from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext from tools.setup_helpers.dist_check import USE_DISTRIBUTED, \ USE_GLOO_IBVERBS ################################################################################ # Parameters parsed from environment ################################################################################ DEBUG = check_env_flag('DEBUG') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') BUILD_PYTORCH = check_env_flag('BUILD_PYTORCH') USE_CUDA_STATIC_LINK = check_env_flag('USE_CUDA_STATIC_LINK') RERUN_CMAKE = True NUM_JOBS = multiprocessing.cpu_count() max_jobs = os.getenv("MAX_JOBS") if max_jobs is not None: NUM_JOBS = min(NUM_JOBS, int(max_jobs)) ONNX_NAMESPACE = os.getenv("ONNX_NAMESPACE") if not ONNX_NAMESPACE:
import importlib from tools.setup_helpers.env import check_env_flag from tools.setup_helpers.cuda import WITH_CUDA, CUDA_HOME, CUDA_VERSION from tools.setup_helpers.cudnn import (WITH_CUDNN, CUDNN_LIBRARY, CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR) from tools.setup_helpers.nccl import WITH_NCCL, WITH_SYSTEM_NCCL, NCCL_LIB_DIR, \ NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.nnpack import WITH_NNPACK from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext from tools.setup_helpers.dist_check import WITH_DISTRIBUTED, \ WITH_DISTRIBUTED_MW, WITH_GLOO_IBVERBS DEBUG = check_env_flag('DEBUG') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') NUM_JOBS = multiprocessing.cpu_count() max_jobs = os.getenv("MAX_JOBS") if max_jobs is not None: NUM_JOBS = min(NUM_JOBS, int(max_jobs)) try: import ninja WITH_NINJA = True except ImportError: WITH_NINJA = False
from tools.setup_helpers.env import check_env_flag from tools.setup_helpers.cuda import WITH_CUDA, CUDA_HOME, CUDA_VERSION from tools.setup_helpers.cudnn import (WITH_CUDNN, CUDNN_LIBRARY, CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR) from tools.setup_helpers.nccl import WITH_NCCL, WITH_SYSTEM_NCCL, NCCL_LIB_DIR, \ NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.mkldnn import (WITH_MKLDNN, MKLDNN_LIBRARY, MKLDNN_LIB_DIR, MKLDNN_INCLUDE_DIR) from tools.setup_helpers.nnpack import WITH_NNPACK from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext from tools.setup_helpers.dist_check import WITH_DISTRIBUTED, \ WITH_DISTRIBUTED_MW, WITH_GLOO_IBVERBS DEBUG = check_env_flag('DEBUG') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') NUM_JOBS = multiprocessing.cpu_count() max_jobs = os.getenv("MAX_JOBS") if max_jobs is not None: NUM_JOBS = min(NUM_JOBS, int(max_jobs)) try: import ninja WITH_NINJA = True except ImportError: WITH_NINJA = False
import glob from tools.setup_helpers.env import check_env_flag from tools.setup_helpers.cuda import WITH_CUDA, CUDA_HOME, CUDA_VERSION from tools.setup_helpers.cudnn import (WITH_CUDNN, CUDNN_LIBRARY, CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR) from tools.setup_helpers.nccl import WITH_NCCL, WITH_SYSTEM_NCCL, NCCL_LIB_DIR, \ NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.nnpack import WITH_NNPACK from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext from tools.setup_helpers.dist_check import WITH_DISTRIBUTED, \ WITH_DISTRIBUTED_MW, WITH_GLOO_IBVERBS DEBUG = check_env_flag('DEBUG') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') NUM_JOBS = multiprocessing.cpu_count() max_jobs = os.getenv("MAX_JOBS") if max_jobs is not None: NUM_JOBS = min(NUM_JOBS, int(max_jobs)) try: import ninja WITH_NINJA = True except ImportError: WITH_NINJA = False
import sys import os import json import glob from tools.setup_helpers.env import check_env_flag from tools.setup_helpers.cuda import WITH_CUDA, CUDA_HOME, CUDA_VERSION from tools.setup_helpers.cudnn import WITH_CUDNN, CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR from tools.setup_helpers.nccl import WITH_NCCL, WITH_SYSTEM_NCCL, NCCL_LIB_DIR, \ NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.split_types import split_types from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext DEBUG = check_env_flag('DEBUG') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') WITH_DISTRIBUTED = not check_env_flag('NO_DISTRIBUTED') and not IS_WINDOWS WITH_DISTRIBUTED_MW = WITH_DISTRIBUTED and check_env_flag('WITH_DISTRIBUTED_MW') try: import ninja WITH_NINJA = True except ImportError: WITH_NINJA = False if not WITH_NINJA:
from tools.setup_helpers.nccl import WITH_NCCL, WITH_SYSTEM_NCCL, NCCL_LIB_DIR, \ NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.mkldnn import (WITH_MKLDNN, MKLDNN_LIBRARY, MKLDNN_LIB_DIR, MKLDNN_INCLUDE_DIR) from tools.setup_helpers.nnpack import WITH_NNPACK from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext from tools.setup_helpers.dist_check import WITH_DISTRIBUTED, \ WITH_DISTRIBUTED_MW, WITH_GLOO_IBVERBS, WITH_C10D ################################################################################ # Parameters parsed from environment ################################################################################ DEBUG = check_env_flag('DEBUG') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') WITH_ROCM = check_env_flag('WITH_ROCM') FULL_CAFFE2 = check_env_flag('FULL_CAFFE2') BUILD_PYTORCH = check_env_flag('BUILD_PYTORCH') NUM_JOBS = multiprocessing.cpu_count() max_jobs = os.getenv("MAX_JOBS") if max_jobs is not None: NUM_JOBS = min(NUM_JOBS, int(max_jobs)) # Ninja try:
import setuptools.command.install import setuptools.command.develop import setuptools.command.build_py import distutils.unixccompiler import distutils.command.build import distutils.command.clean import platform import subprocess import shutil import sys import os from tools.setup_helpers.env import check_env_flag from tools.setup_helpers.cuda import WITH_CUDA, CUDA_HOME from tools.setup_helpers.cudnn import WITH_CUDNN, CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR DEBUG = check_env_flag('DEBUG') WITH_DISTRIBUTED = check_env_flag('WITH_DISTRIBUTED') WITH_DISTRIBUTED_MW = WITH_DISTRIBUTED and check_env_flag('WITH_DISTRIBUTED_MW') ################################################################################ # Monkey-patch setuptools to compile in parallel ################################################################################ original_link = distutils.unixccompiler.UnixCCompiler.link def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): # those lines are copied from distutils.ccompiler.CCompiler directly macros, objects, extra_postargs, pp_opts, build = self._setup_compile( output_dir, macros, include_dirs, sources, depends, extra_postargs)
from tools.setup_helpers.nccl import USE_NCCL, USE_SYSTEM_NCCL, NCCL_LIB_DIR, \ NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.mkldnn import (USE_MKLDNN, MKLDNN_LIBRARY, MKLDNN_LIB_DIR, MKLDNN_INCLUDE_DIR) from tools.setup_helpers.nnpack import USE_NNPACK from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext from tools.setup_helpers.dist_check import USE_DISTRIBUTED, \ USE_DISTRIBUTED_MW, USE_GLOO_IBVERBS, USE_C10D ################################################################################ # Parameters parsed from environment ################################################################################ DEBUG = check_env_flag('DEBUG') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') FULL_CAFFE2 = check_env_flag('FULL_CAFFE2') BUILD_PYTORCH = check_env_flag('BUILD_PYTORCH') NUM_JOBS = multiprocessing.cpu_count() max_jobs = os.getenv("MAX_JOBS") if max_jobs is not None: NUM_JOBS = min(NUM_JOBS, int(max_jobs)) ONNX_NAMESPACE = os.getenv("ONNX_NAMESPACE") if not ONNX_NAMESPACE: ONNX_NAMESPACE = "onnx_torch"
import os import json import glob from tools.setup_helpers.env import check_env_flag from tools.setup_helpers.cuda import WITH_CUDA, CUDA_HOME, CUDA_VERSION from tools.setup_helpers.cudnn import WITH_CUDNN, CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR from tools.setup_helpers.nccl import WITH_NCCL, WITH_SYSTEM_NCCL, NCCL_LIB_DIR, \ NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.nnpack import WITH_NNPACK from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.split_types import split_types from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext DEBUG = check_env_flag('DEBUG') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') WITH_DISTRIBUTED = not check_env_flag('NO_DISTRIBUTED') and not IS_WINDOWS WITH_DISTRIBUTED_MW = WITH_DISTRIBUTED and check_env_flag('WITH_DISTRIBUTED_MW') try: import ninja WITH_NINJA = True except ImportError: WITH_NINJA = False if not WITH_NINJA:
from tools.setup_helpers.env import check_env_flag from tools.setup_helpers.cuda import WITH_CUDA, CUDA_HOME, CUDA_VERSION from tools.setup_helpers.cudnn import (WITH_CUDNN, CUDNN_LIBRARY, CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR) from tools.setup_helpers.nccl import WITH_NCCL, WITH_SYSTEM_NCCL, NCCL_LIB_DIR, \ NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.mkldnn import (WITH_MKLDNN, MKLDNN_LIBRARY, MKLDNN_LIB_DIR, MKLDNN_INCLUDE_DIR) from tools.setup_helpers.nnpack import WITH_NNPACK from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext from tools.setup_helpers.dist_check import WITH_DISTRIBUTED, \ WITH_DISTRIBUTED_MW, WITH_GLOO_IBVERBS DEBUG = check_env_flag('DEBUG') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') # Check if ROCM is enabled WITH_ROCM = check_env_flag('WITH_ROCM') NUM_JOBS = multiprocessing.cpu_count() max_jobs = os.getenv("MAX_JOBS") if max_jobs is not None: NUM_JOBS = min(NUM_JOBS, int(max_jobs)) try: import ninja
from tools.setup_helpers.env import check_env_flag from tools.setup_helpers.cuda import WITH_CUDA, CUDA_HOME, CUDA_VERSION from tools.setup_helpers.cudnn import (WITH_CUDNN, CUDNN_LIBRARY, CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR) from tools.setup_helpers.nccl import WITH_NCCL, WITH_SYSTEM_NCCL, NCCL_LIB_DIR, \ NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.nnpack import WITH_NNPACK from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.split_types import split_types from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext from tools.setup_helpers.dist_check import WITH_DISTRIBUTED, \ WITH_DISTRIBUTED_MW, WITH_GLOO_IBVERBS DEBUG = check_env_flag('DEBUG') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') WITH_SCALARS = check_env_flag('WITH_SCALARS') try: import ninja WITH_NINJA = True except ImportError: WITH_NINJA = False if not WITH_NINJA:
# and matches the flags set for protobuf and ONNX extra_link_args = ['/NODEFAULTLIB:LIBCMT.LIB'] # /MD links against DLL runtime # and matches the flags set for protobuf and ONNX # /Z7 turns on symbolic debugging information in .obj files # /EHa is about native C++ catch support for asynchronous # structured exception handling (SEH) # /DNOMINMAX removes builtin min/max functions # /wdXXXX disables warning no. XXXX extra_compile_args = ['/MD', '/Z7', '/EHa', '/DNOMINMAX', '/wd4267', '/wd4251', '/wd4522', '/wd4522', '/wd4838', '/wd4305', '/wd4244', '/wd4190', '/wd4101', '/wd4996', '/wd4275'] if sys.version_info[0] == 2: if not check_env_flag('FORCE_PY27_BUILD'): report('The support for PyTorch with Python 2.7 on Windows is very experimental.') report('Please set the flag `FORCE_PY27_BUILD` to 1 to continue build.') sys.exit(1) # /bigobj increases number of sections in .obj file, which is needed to link # against libaries in Python 2.7 under Windows extra_compile_args.append('/bigobj') else: extra_link_args = [] extra_compile_args = [ '-std=c++11', '-Wall', '-Wextra', '-Wno-strict-overflow', '-Wno-unused-parameter', '-Wno-missing-field-initializers',
MIOPEN_LIB_DIR, MIOPEN_INCLUDE_DIR) from tools.setup_helpers.nccl import USE_NCCL, USE_SYSTEM_NCCL, NCCL_LIB_DIR, \ NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.nnpack import USE_NNPACK from tools.setup_helpers.qnnpack import USE_QNNPACK from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext from tools.setup_helpers.dist_check import USE_DISTRIBUTED, \ USE_GLOO_IBVERBS ################################################################################ # Parameters parsed from environment ################################################################################ DEBUG = check_env_flag('DEBUG') REL_WITH_DEB_INFO = check_env_flag('REL_WITH_DEB_INFO') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') IS_PPC = (platform.machine() == 'ppc64le') IS_ARM = (platform.machine() == 'aarch64') BUILD_PYTORCH = check_env_flag('BUILD_PYTORCH') # ppc64le and aarch64 do not support MKLDNN if IS_PPC or IS_ARM: USE_MKLDNN = check_env_flag('USE_MKLDNN', 'OFF') else: USE_MKLDNN = check_env_flag('USE_MKLDNN', 'ON') USE_CUDA_STATIC_LINK = check_env_flag('USE_CUDA_STATIC_LINK')
def configure_extension_build(): r"""Configures extension build options according to system environment and user's choice. Returns: The input to parameters ext_modules, cmdclass, packages, and entry_points as required in setuptools.setup. """ try: cmake_cache_vars = defaultdict(lambda: False, cmake.get_cmake_cache_variables()) except FileNotFoundError: # CMakeCache.txt does not exist. Probably running "python setup.py clean" over a clean directory. cmake_cache_vars = defaultdict(lambda: False) ################################################################################ # Configure compile flags ################################################################################ library_dirs = [] extra_install_requires = [] if IS_WINDOWS: # /NODEFAULTLIB makes sure we only link to DLL runtime # and matches the flags set for protobuf and ONNX extra_link_args = ['/NODEFAULTLIB:LIBCMT.LIB'] # /MD links against DLL runtime # and matches the flags set for protobuf and ONNX # /EHsc is about standard C++ exception handling # /DNOMINMAX removes builtin min/max functions # /wdXXXX disables warning no. XXXX extra_compile_args = ['/MD', '/EHsc', '/DNOMINMAX', '/wd4267', '/wd4251', '/wd4522', '/wd4522', '/wd4838', '/wd4305', '/wd4244', '/wd4190', '/wd4101', '/wd4996', '/wd4275'] else: extra_link_args = [] extra_compile_args = [ '-Wall', '-Wextra', '-Wno-strict-overflow', '-Wno-unused-parameter', '-Wno-missing-field-initializers', '-Wno-write-strings', '-Wno-unknown-pragmas', # This is required for Python 2 declarations that are deprecated in 3. '-Wno-deprecated-declarations', # Python 2.6 requires -fno-strict-aliasing, see # http://legacy.python.org/dev/peps/pep-3123/ # We also depend on it in our code (even Python 3). '-fno-strict-aliasing', # Clang has an unfixed bug leading to spurious missing # braces warnings, see # https://bugs.llvm.org/show_bug.cgi?id=21629 '-Wno-missing-braces', ] if check_env_flag('WERROR'): extra_compile_args.append('-Werror') library_dirs.append(lib_path) main_compile_args = [] main_libraries = ['torch_python'] main_link_args = [] main_sources = ["torch/csrc/stub.c"] if cmake_cache_vars['USE_CUDA']: library_dirs.append( os.path.dirname(cmake_cache_vars['CUDA_CUDA_LIB'])) if cmake_cache_vars['USE_NUMPY']: extra_install_requires += ['numpy'] if build_type.is_debug(): if IS_WINDOWS: extra_compile_args.append('/Z7') extra_link_args.append('/DEBUG:FULL') else: extra_compile_args += ['-O0', '-g'] extra_link_args += ['-O0', '-g'] if build_type.is_rel_with_deb_info(): if IS_WINDOWS: extra_compile_args.append('/Z7') extra_link_args.append('/DEBUG:FULL') else: extra_compile_args += ['-g'] extra_link_args += ['-g'] def make_relative_rpath(path): if IS_DARWIN: return '-Wl,-rpath,@loader_path/' + path elif IS_WINDOWS: return '' else: return '-Wl,-rpath,$ORIGIN/' + path ################################################################################ # Declare extensions and package ################################################################################ extensions = [] packages = find_packages(exclude=('tools', 'tools.*')) C = Extension("torch._C", libraries=main_libraries, sources=main_sources, language='c', extra_compile_args=main_compile_args + extra_compile_args, include_dirs=[], library_dirs=library_dirs, extra_link_args=extra_link_args + main_link_args + [make_relative_rpath('lib')]) extensions.append(C) if not IS_WINDOWS: DL = Extension("torch._dl", sources=["torch/csrc/dl.c"], language='c') extensions.append(DL) # These extensions are built by cmake and copied manually in build_extensions() # inside the build_ext implementation extensions.append( Extension( name=str('caffe2.python.caffe2_pybind11_state'), sources=[]), ) if cmake_cache_vars['USE_CUDA']: extensions.append( Extension( name=str('caffe2.python.caffe2_pybind11_state_gpu'), sources=[]), ) if cmake_cache_vars['USE_ROCM']: extensions.append( Extension( name=str('caffe2.python.caffe2_pybind11_state_hip'), sources=[]), ) cmdclass = { 'build_ext': build_ext, 'clean': clean, 'install': install, } entry_points = { 'console_scripts': [ 'convert-caffe2-to-onnx = caffe2.python.onnx.bin.conversion:caffe2_to_onnx', 'convert-onnx-to-caffe2 = caffe2.python.onnx.bin.conversion:onnx_to_caffe2', ] } return extensions, cmdclass, packages, entry_points, extra_install_requires
from tools.setup_helpers.env import check_env_flag from tools.setup_helpers.cuda import WITH_CUDA, CUDA_HOME, CUDA_VERSION from tools.setup_helpers.cudnn import (WITH_CUDNN, CUDNN_LIBRARY, CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR) from tools.setup_helpers.nccl import WITH_NCCL, WITH_SYSTEM_NCCL, NCCL_LIB_DIR, \ NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.nnpack import WITH_NNPACK from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.split_types import split_types from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext from tools.setup_helpers.dist_check import WITH_DISTRIBUTED, \ WITH_DISTRIBUTED_MW, WITH_GLOO_IBVERBS DEBUG = check_env_flag('DEBUG') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') if 'WITH_SCALARS' not in os.environ: os.environ['WITH_SCALARS'] = '1' WITH_SCALARS = check_env_flag('WITH_SCALARS') NUM_JOBS = multiprocessing.cpu_count() max_jobs = os.getenv("MAX_JOBS") if max_jobs is not None: NUM_JOBS = min(NUM_JOBS, int(max_jobs)) try:
def configure_extension_build(): r"""Configures extension build options according to system environment and user's choice. Returns: The input to parameters ext_modules, cmdclass, packages, and entry_points as required in setuptools.setup. """ try: cmake_cache_vars = defaultdict(lambda: False, cmake.get_cmake_cache_variables()) except FileNotFoundError: # CMakeCache.txt does not exist. Probably running "python setup.py clean" over a clean directory. cmake_cache_vars = defaultdict(lambda: False) ################################################################################ # Configure compile flags ################################################################################ library_dirs = [] if IS_WINDOWS: # /NODEFAULTLIB makes sure we only link to DLL runtime # and matches the flags set for protobuf and ONNX extra_link_args = ['/NODEFAULTLIB:LIBCMT.LIB'] # /MD links against DLL runtime # and matches the flags set for protobuf and ONNX # /Z7 turns on symbolic debugging information in .obj files # /EHa is about native C++ catch support for asynchronous # structured exception handling (SEH) # /DNOMINMAX removes builtin min/max functions # /wdXXXX disables warning no. XXXX extra_compile_args = [ '/MD', '/Z7', '/EHa', '/DNOMINMAX', '/wd4267', '/wd4251', '/wd4522', '/wd4522', '/wd4838', '/wd4305', '/wd4244', '/wd4190', '/wd4101', '/wd4996', '/wd4275' ] if sys.version_info[0] == 2: if not check_env_flag('FORCE_PY27_BUILD'): report( 'The support for PyTorch with Python 2.7 on Windows is very experimental.' ) report( 'Please set the flag `FORCE_PY27_BUILD` to 1 to continue build.' ) sys.exit(1) # /bigobj increases number of sections in .obj file, which is needed to link # against libaries in Python 2.7 under Windows extra_compile_args.append('/bigobj') else: extra_link_args = [] extra_compile_args = [ '-std=c++14', '-Wall', '-Wextra', '-Wno-strict-overflow', '-Wno-unused-parameter', '-Wno-missing-field-initializers', '-Wno-write-strings', '-Wno-unknown-pragmas', # This is required for Python 2 declarations that are deprecated in 3. '-Wno-deprecated-declarations', # Python 2.6 requires -fno-strict-aliasing, see # http://legacy.python.org/dev/peps/pep-3123/ # We also depend on it in our code (even Python 3). '-fno-strict-aliasing', # Clang has an unfixed bug leading to spurious missing # braces warnings, see # https://bugs.llvm.org/show_bug.cgi?id=21629 '-Wno-missing-braces', ] if check_env_flag('WERROR'): extra_compile_args.append('-Werror') library_dirs.append(lib_path) # we specify exact lib names to avoid conflict with lua-torch installs CAFFE2_LIBS = [] main_compile_args = [] main_libraries = ['shm', 'torch_python'] main_link_args = [] main_sources = ["torch/csrc/stub.cpp"] # Before the introduction of stub.cpp, _C.so and libcaffe2.so defined # some of the same symbols, and it was important for _C.so to be # loaded before libcaffe2.so so that the versions in _C.so got # used. This happened automatically because we loaded _C.so directly, # and libcaffe2.so was brought in as a dependency (though I suspect it # may have been possible to break by importing caffe2 first in the # same process). # # Now, libtorch_python.so and libcaffe2.so define some of the same # symbols. We directly load the _C.so stub, which brings both of these # in as dependencies. We have to make sure that symbols continue to be # looked up in libtorch_python.so first, by making sure it comes # before libcaffe2.so in the linker command. main_link_args.extend(CAFFE2_LIBS) if cmake_cache_vars['USE_CUDA']: library_dirs.append(os.path.dirname(cmake_cache_vars['CUDA_CUDA_LIB'])) if build_type.is_debug(): if IS_WINDOWS: extra_link_args.append('/DEBUG:FULL') else: extra_compile_args += ['-O0', '-g'] extra_link_args += ['-O0', '-g'] if build_type.is_rel_with_deb_info(): if IS_WINDOWS: extra_link_args.append('/DEBUG:FULL') else: extra_compile_args += ['-g'] extra_link_args += ['-g'] def make_relative_rpath(path): if IS_DARWIN: return '-Wl,-rpath,@loader_path/' + path elif IS_WINDOWS: return '' else: return '-Wl,-rpath,$ORIGIN/' + path ################################################################################ # Declare extensions and package ################################################################################ extensions = [] packages = find_packages(exclude=('tools', 'tools.*')) C = Extension("torch._C", libraries=main_libraries, sources=main_sources, language='c++', extra_compile_args=main_compile_args + extra_compile_args, include_dirs=[], library_dirs=library_dirs, extra_link_args=extra_link_args + main_link_args + [make_relative_rpath('lib')]) extensions.append(C) if not IS_WINDOWS: DL = Extension("torch._dl", sources=["torch/csrc/dl.c"], language='c') extensions.append(DL) # These extensions are built by cmake and copied manually in build_extensions() # inside the build_ext implementaiton extensions.append( Extension(name=str('caffe2.python.caffe2_pybind11_state'), sources=[]), ) if cmake_cache_vars['USE_CUDA']: extensions.append( Extension(name=str('caffe2.python.caffe2_pybind11_state_gpu'), sources=[]), ) if cmake_cache_vars['USE_ROCM']: extensions.append( Extension(name=str('caffe2.python.caffe2_pybind11_state_hip'), sources=[]), ) cmdclass = { 'build_ext': build_ext, 'clean': clean, 'install': install, } entry_points = { 'console_scripts': [ 'convert-caffe2-to-onnx = caffe2.python.onnx.bin.conversion:caffe2_to_onnx', 'convert-onnx-to-caffe2 = caffe2.python.onnx.bin.conversion:onnx_to_caffe2', ] } return extensions, cmdclass, packages, entry_points
from tools.setup_helpers.nccl import USE_NCCL, USE_SYSTEM_NCCL, NCCL_LIB_DIR, \ NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB from tools.setup_helpers.mkldnn import (USE_MKLDNN, MKLDNN_LIBRARY, MKLDNN_LIB_DIR, MKLDNN_INCLUDE_DIR) from tools.setup_helpers.nnpack import USE_NNPACK from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext from tools.setup_helpers.dist_check import USE_DISTRIBUTED, \ USE_DISTRIBUTED_MW, USE_GLOO_IBVERBS, USE_C10D ################################################################################ # Parameters parsed from environment ################################################################################ DEBUG = check_env_flag('DEBUG') IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') FULL_CAFFE2 = check_env_flag('FULL_CAFFE2') BUILD_PYTORCH = check_env_flag('BUILD_PYTORCH') USE_CUDA_STATIC_LINK = check_env_flag('USE_CUDA_STATIC_LINK') NUM_JOBS = multiprocessing.cpu_count() max_jobs = os.getenv("MAX_JOBS") if max_jobs is not None: NUM_JOBS = min(NUM_JOBS, int(max_jobs)) ONNX_NAMESPACE = os.getenv("ONNX_NAMESPACE")