def run(self): # Print build options if WITH_NUMPY: print('-- Building with NumPy bindings') else: print('-- NumPy not found') if WITH_CUDNN: print('-- Detected cuDNN at ' + CUDNN_LIBRARY + ', ' + CUDNN_INCLUDE_DIR) else: print('-- Not using cuDNN') if WITH_CUDA: print('-- Detected CUDA at ' + CUDA_HOME) else: print('-- Not using CUDA') if WITH_MKLDNN: print('-- Detected MKLDNN at ' + MKLDNN_LIBRARY + ', ' + MKLDNN_INCLUDE_DIR) else: print('-- Not using MKLDNN') if WITH_NCCL and WITH_SYSTEM_NCCL: print('-- Using system provided NCCL library at ' + NCCL_SYSTEM_LIB + ', ' + NCCL_INCLUDE_DIR) elif WITH_NCCL: print('-- Building NCCL library') else: print('-- Not using NCCL') if WITH_DISTRIBUTED: print('-- Building with distributed package ') monkey_patch_THD_link_flags() else: print('-- Building without distributed package') generate_code(ninja_global) if WITH_NINJA: # before we start the normal build make sure all generated code # gets built ninja_global.run() # It's an old-style class in Python 2.7... setuptools.command.build_ext.build_ext.run(self) # Copy the essential export library to compile C++ extensions. if IS_WINDOWS: build_temp = self.build_temp ext_filename = self.get_ext_filename('_C') lib_filename = '.'.join(ext_filename.split('.')[:-1]) + '.lib' export_lib = os.path.join(build_temp, 'torch', 'csrc', lib_filename).replace('\\', '/') build_lib = self.build_lib target_lib = os.path.join(build_lib, 'torch', 'lib', '_C.lib').replace('\\', '/') self.copy_file(export_lib, target_lib)
def run(self): # Print build options if WITH_NUMPY: print('-- Building with NumPy bindings') else: print('-- NumPy not found') if WITH_CUDNN: print('-- Detected cuDNN at ' + CUDNN_LIB_DIR + ', ' + CUDNN_INCLUDE_DIR) else: print('-- Not using cuDNN') if WITH_CUDA: print('-- Detected CUDA at ' + CUDA_HOME) else: print('-- Not using CUDA') if WITH_NCCL and WITH_SYSTEM_NCCL: print('-- Using system provided NCCL library at ' + NCCL_SYSTEM_LIB + ', ' + NCCL_INCLUDE_DIR) elif WITH_NCCL: print('-- Building NCCL library') else: print('-- Not using NCCL') if WITH_DISTRIBUTED: print('-- Building with distributed package ') monkey_patch_THD_link_flags() else: print('-- Building without distributed package') generate_code(ninja_global) if IS_WINDOWS: build_temp = self.build_temp build_dir = 'torch/csrc' ext_filename = self.get_ext_filename('_C') lib_filename = '.'.join(ext_filename.split('.')[:-1]) + '.lib' _C_LIB = os.path.join(build_temp, build_dir, lib_filename).replace('\\', '/') THNN.extra_link_args += [_C_LIB] if WITH_CUDA: THCUNN.extra_link_args += [_C_LIB] else: # To generate .obj files for AutoGPU for the export class # a header file cannot build, so it has to be copied to someplace as a source file if not os.path.exists("torch/csrc/generated"): os.mkdir("torch/csrc/generated") if os.path.exists("torch/csrc/generated/AutoGPU_cpu_win.cpp"): os.remove("torch/csrc/generated/AutoGPU_cpu_win.cpp") shutil.copyfile("torch/csrc/cuda/AutoGPU.h", "torch/csrc/generated/AutoGPU_cpu_win.cpp") if WITH_NINJA: # before we start the normal build make sure all generated code # gets built ninja_global.run() # It's an old-style class in Python 2.7... setuptools.command.build_ext.build_ext.run(self)
def run(self): # Print build options if WITH_NUMPY: print('-- Building with NumPy bindings') else: print('-- NumPy not found') if WITH_CUDNN: print('-- Detected cuDNN at ' + CUDNN_LIBRARY + ', ' + CUDNN_INCLUDE_DIR) else: print('-- Not using cuDNN') if WITH_CUDA: print('-- Detected CUDA at ' + CUDA_HOME) else: print('-- Not using CUDA') if WITH_MKLDNN: print('-- Detected MKLDNN at ' + MKLDNN_LIBRARY + ', ' + MKLDNN_INCLUDE_DIR) else: print('-- Not using MKLDNN') if WITH_NCCL and WITH_SYSTEM_NCCL: print('-- Using system provided NCCL library at ' + NCCL_SYSTEM_LIB + ', ' + NCCL_INCLUDE_DIR) elif WITH_NCCL: print('-- Building NCCL library') else: print('-- Not using NCCL') if WITH_DISTRIBUTED: print('-- Building with distributed package ') monkey_patch_THD_link_flags() else: print('-- Building without distributed package') generate_code(ninja_global) if WITH_NINJA: # before we start the normal build make sure all generated code # gets built ninja_global.run() # It's an old-style class in Python 2.7... setuptools.command.build_ext.build_ext.run(self) # Copy the essential export library to compile C++ extensions. if IS_WINDOWS: build_temp = self.build_temp ext_filename = self.get_ext_filename('_C') lib_filename = '.'.join(ext_filename.split('.')[:-1]) + '.lib' export_lib = os.path.join( build_temp, 'torch', 'csrc', lib_filename).replace('\\', '/') build_lib = self.build_lib target_lib = os.path.join( build_lib, 'torch', 'lib', '_C.lib').replace('\\', '/') self.copy_file(export_lib, target_lib)
def run(self): # Print build options if WITH_NUMPY: print('-- Building with NumPy bindings') else: print('-- NumPy not found') if WITH_CUDNN: print('-- Detected cuDNN at ' + CUDNN_LIBRARY + ', ' + CUDNN_INCLUDE_DIR) else: print('-- Not using cuDNN') if WITH_CUDA: print('-- Detected CUDA at ' + CUDA_HOME) else: print('-- Not using CUDA') if WITH_NCCL and WITH_SYSTEM_NCCL: print('-- Using system provided NCCL library at ' + NCCL_SYSTEM_LIB + ', ' + NCCL_INCLUDE_DIR) elif WITH_NCCL: print('-- Building NCCL library') else: print('-- Not using NCCL') if WITH_DISTRIBUTED: print('-- Building with distributed package ') monkey_patch_THD_link_flags() else: print('-- Building without distributed package') generate_code(ninja_global) if IS_WINDOWS: build_temp = self.build_temp build_dir = 'torch/csrc' ext_filename = self.get_ext_filename('_C') lib_filename = '.'.join(ext_filename.split('.')[:-1]) + '.lib' _C_LIB = os.path.join(build_temp, build_dir, lib_filename).replace('\\', '/') THNN.extra_link_args += [_C_LIB] if WITH_CUDA: THCUNN.extra_link_args += [_C_LIB] else: # To generate .obj files for AutoGPU for the export class # a header file cannot build, so it has to be copied to someplace as a source file if not os.path.exists("torch/csrc/generated"): os.mkdir("torch/csrc/generated") if os.path.exists("torch/csrc/generated/AutoGPU_cpu_win.cpp"): os.remove("torch/csrc/generated/AutoGPU_cpu_win.cpp") shutil.copyfile("torch/csrc/cuda/AutoGPU.h", "torch/csrc/generated/AutoGPU_cpu_win.cpp") if WITH_NINJA: # before we start the normal build make sure all generated code # gets built ninja_global.run() # It's an old-style class in Python 2.7... setuptools.command.build_ext.build_ext.run(self)
def run(self): # Print build options if WITH_NUMPY: print('-- Building with NumPy bindings') else: print('-- NumPy not found') if WITH_CUDNN: print('-- Detected cuDNN at ' + CUDNN_LIBRARY + ', ' + CUDNN_INCLUDE_DIR) else: print('-- Not using cuDNN') if WITH_CUDA: print('-- Detected CUDA at ' + CUDA_HOME) else: print('-- Not using CUDA') if WITH_NCCL and WITH_SYSTEM_NCCL: print('-- Using system provided NCCL library at ' + NCCL_SYSTEM_LIB + ', ' + NCCL_INCLUDE_DIR) elif WITH_NCCL: print('-- Building NCCL library') else: print('-- Not using NCCL') if WITH_DISTRIBUTED: print('-- Building with distributed package ') monkey_patch_THD_link_flags() else: print('-- Building without distributed package') # Copy headers necessary to compile C++ extensions. self.copy_tree('torch/csrc', 'torch/lib/include/torch/csrc/') self.copy_tree('torch/lib/pybind11/include/pybind11/', 'torch/lib/include/pybind11') self.copy_file('torch/torch.h', 'torch/lib/include/torch/torch.h') generate_code(ninja_global) if WITH_NINJA: # before we start the normal build make sure all generated code # gets built ninja_global.run() # It's an old-style class in Python 2.7... setuptools.command.build_ext.build_ext.run(self)
def run(self): # Print build options if WITH_NUMPY: print('-- Building with NumPy bindings') else: print('-- NumPy not found') if WITH_CUDNN: print('-- Detected cuDNN at ' + CUDNN_LIBRARY + ', ' + CUDNN_INCLUDE_DIR) else: print('-- Not using cuDNN') if WITH_CUDA: print('-- Detected CUDA at ' + CUDA_HOME) else: print('-- Not using CUDA') if WITH_MKLDNN: print('-- Detected MKLDNN at ' + MKLDNN_LIBRARY + ', ' + MKLDNN_INCLUDE_DIR) else: print('-- Not using MKLDNN') if WITH_NCCL and WITH_SYSTEM_NCCL: print('-- Using system provided NCCL library at ' + NCCL_SYSTEM_LIB + ', ' + NCCL_INCLUDE_DIR) elif WITH_NCCL: print('-- Building NCCL library') else: print('-- Not using NCCL') if WITH_DISTRIBUTED: print('-- Building with distributed package ') monkey_patch_THD_link_flags() else: print('-- Building without distributed package') generate_code(ninja_global) if WITH_NINJA: # before we start the normal build make sure all generated code # gets built ninja_global.run() # It's an old-style class in Python 2.7... setuptools.command.build_ext.build_ext.run(self)