def generate_code(ninja_global=None, declarations_path=None, nn_path=None, install_dir=None): # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) from tools.autograd.gen_autograd import gen_autograd from tools.jit.gen_jit_dispatch import gen_jit_dispatch from tools.nnwrap import generate_wrappers as generate_nn_wrappers # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily # used by the legacy NN bindings. generate_nn_wrappers(nn_path, install_dir, 'tools/cwrap/plugins/templates') # Build ATen based Variable classes autograd_gen_dir = install_dir or 'torch/csrc/autograd/generated' jit_gen_dir = install_dir or 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.makedirs(d) gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir, 'tools/autograd') gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir, 'tools/jit/templates')
def generate_code(ninja_global=None): # if ninja is enabled, we just register this file as something # ninja will need to call if needed if ninja_global is not None: return generate_code_ninja(ninja_global) # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) from tools.autograd.gen_autograd import gen_autograd from tools.jit.gen_jit_dispatch import gen_jit_dispatch from tools.nnwrap import generate_wrappers as generate_nn_wrappers # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily # used by the legacy NN bindings. generate_nn_wrappers() # Build ATen based Variable classes autograd_gen_dir = 'torch/csrc/autograd/generated' jit_gen_dir = 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.mkdir(d) gen_autograd( 'torch/lib/tmp_install/share/ATen/Declarations.yaml', autograd_gen_dir) gen_jit_dispatch( 'torch/lib/tmp_install/share/ATen/Declarations.yaml', jit_gen_dir)
def generate_code(ninja_global=None, declarations_path=None, nn_path=None, install_dir=None): # if ninja is enabled, we just register this file as something # ninja will need to call if needed if ninja_global is not None: return generate_code_ninja(ninja_global) # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) from tools.autograd.gen_autograd import gen_autograd from tools.jit.gen_jit_dispatch import gen_jit_dispatch from tools.nnwrap import generate_wrappers as generate_nn_wrappers # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily # used by the legacy NN bindings. generate_nn_wrappers(nn_path, install_dir, 'tools/cwrap/plugins/templates') # Build ATen based Variable classes autograd_gen_dir = install_dir or 'torch/csrc/autograd/generated' jit_gen_dir = install_dir or 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.makedirs(d) gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir, 'tools/autograd') gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir, 'tools/jit/templates')
def generate_code(ninja_global=None, declarations_path=None, nn_path=None): # if ninja is enabled, we just register this file as something # ninja will need to call if needed if ninja_global is not None: return generate_code_ninja(ninja_global) # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) from tools.autograd.gen_autograd import gen_autograd from tools.jit.gen_jit_dispatch import gen_jit_dispatch from tools.nnwrap import generate_wrappers as generate_nn_wrappers # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily # used by the legacy NN bindings. generate_nn_wrappers(nn_path) # Build ATen based Variable classes autograd_gen_dir = 'torch/csrc/autograd/generated' jit_gen_dir = 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.mkdir(d) gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir) gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir)
def generate_code(ninja_global=None, declarations_path=None, nn_path=None, install_dir=None, subset=None): # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python from tools.jit.gen_jit_dispatch import gen_jit_dispatch # Build ATen based Variable classes autograd_gen_dir = install_dir or 'torch/csrc/autograd/generated' jit_gen_dir = install_dir or 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.makedirs(d) if subset == "pybindings" or not subset: gen_autograd_python(declarations_path or DECLARATIONS_PATH, autograd_gen_dir, 'tools/autograd') if subset == "libtorch" or not subset: gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir, 'tools/autograd') gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir, 'tools/jit/templates')
def generate_code(ninja_global=None): # if ninja is enabled, we just register this file as something # ninja will need to call if needed if ninja_global is not None: return generate_code_ninja(ninja_global) # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(root) from tools.cwrap import cwrap from tools.cwrap.plugins.THPPlugin import THPPlugin from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin from tools.cwrap.plugins.AutoGPU import AutoGPU from tools.cwrap.plugins.BoolOption import BoolOption from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin from tools.cwrap.plugins.NullableArguments import NullableArguments from tools.cwrap.plugins.CuDNNPlugin import CuDNNPlugin from tools.cwrap.plugins.WrapDim import WrapDim from tools.cwrap.plugins.AssertNDim import AssertNDim from tools.cwrap.plugins.Broadcast import Broadcast from tools.cwrap.plugins.ProcessorSpecificPlugin import ProcessorSpecificPlugin from tools.autograd.gen_variable_type import gen_variable_type from tools.jit.gen_jit_dispatch import gen_jit_dispatch thp_plugin = THPPlugin() cwrap('torch/csrc/generic/TensorMethods.cwrap', plugins=[ ProcessorSpecificPlugin(), BoolOption(), thp_plugin, AutoGPU(condition='IS_CUDA'), ArgcountSortPlugin(), KwargsPlugin(), AssertNDim(), WrapDim(), Broadcast() ]) cwrap('torch/csrc/cudnn/cuDNN.cwrap', plugins=[CuDNNPlugin(), NullableArguments()]) # Build ATen based Variable classes autograd_gen_dir = 'torch/csrc/autograd/generated' jit_gen_dir = 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.mkdir(d) gen_variable_type('torch/lib/tmp_install/share/ATen/Declarations.yaml', autograd_gen_dir) gen_jit_dispatch('torch/lib/tmp_install/share/ATen/Declarations.yaml', jit_gen_dir)
def generate_code(ninja_global=None, declarations_path=None, nn_path=None, install_dir=None, subset=None, disable_autograd=False, selected_op_list_path=None, selected_op_list=None, force_schema_registration=False): # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python from tools.jit.gen_jit_dispatch import gen_jit_dispatch # Build ATen based Variable classes install_dir = install_dir or 'torch/csrc' autograd_gen_dir = os.path.join(install_dir, 'autograd', 'generated') jit_gen_dir = os.path.join(install_dir, 'jit', 'generated') for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.makedirs(d) runfiles_dir = os.environ.get("RUNFILES_DIR", None) data_dir = os.path.join(runfiles_dir, 'pytorch') if runfiles_dir else '' autograd_dir = os.path.join(data_dir, 'tools', 'autograd') tools_jit_templates = os.path.join(data_dir, 'tools', 'jit', 'templates') if subset == "pybindings" or not subset: gen_autograd_python(declarations_path or DECLARATIONS_PATH, autograd_gen_dir, autograd_dir) if subset == "libtorch" or not subset: # TODO: add selected op mechanism in augotrad to save learning size gen_autograd( declarations_path or DECLARATIONS_PATH, autograd_gen_dir, autograd_dir, disable_autograd=disable_autograd, ) gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir, tools_jit_templates, disable_autograd=disable_autograd, selected_op_list_path=selected_op_list_path, selected_op_list=selected_op_list, force_schema_registration=force_schema_registration)
def generate_code(ninja_global=None): # if ninja is enabled, we just register this file as something # ninja will need to call if needed if ninja_global is not None: return generate_code_ninja(ninja_global) # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) from tools.cwrap import cwrap from tools.cwrap.plugins.THPPlugin import THPPlugin from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin from tools.cwrap.plugins.AutoGPU import AutoGPU from tools.cwrap.plugins.BoolOption import BoolOption from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin from tools.cwrap.plugins.NullableArguments import NullableArguments from tools.cwrap.plugins.WrapDim import WrapDim from tools.cwrap.plugins.AssertNDim import AssertNDim from tools.cwrap.plugins.Broadcast import Broadcast from tools.cwrap.plugins.ProcessorSpecificPlugin import ProcessorSpecificPlugin from tools.autograd.gen_autograd import gen_autograd from tools.jit.gen_jit_dispatch import gen_jit_dispatch thp_plugin = THPPlugin() cwrap('torch/csrc/generic/TensorMethods.cwrap', plugins=[ ProcessorSpecificPlugin(), BoolOption(), thp_plugin, AutoGPU(condition='IS_CUDA'), ArgcountSortPlugin(), KwargsPlugin(), AssertNDim(), WrapDim(), Broadcast() ]) # Build ATen based Variable classes autograd_gen_dir = 'torch/csrc/autograd/generated' jit_gen_dir = 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.mkdir(d) gen_autograd( 'torch/lib/tmp_install/share/ATen/Declarations.yaml', autograd_gen_dir) gen_jit_dispatch( 'torch/lib/tmp_install/share/ATen/Declarations.yaml', jit_gen_dir)
def run(self): # Print build options if WITH_NUMPY: print('-- Building with NumPy bindings') else: print('-- NumPy not found') if WITH_CUDNN: print('-- Detected cuDNN at ' + CUDNN_LIB_DIR + ', ' + CUDNN_INCLUDE_DIR) else: print('-- Not using cuDNN') if WITH_CUDA: print('-- Detected CUDA at ' + CUDA_HOME) else: print('-- Not using CUDA') if WITH_NCCL and WITH_SYSTEM_NCCL: print('-- Using system provided NCCL library at ' + NCCL_SYSTEM_LIB + ', ' + NCCL_INCLUDE_DIR) elif WITH_NCCL: print('-- Building NCCL library') else: print('-- Not using NCCL') if WITH_DISTRIBUTED: print('-- Building with distributed package ') monkey_patch_THD_link_flags() else: print('-- Building without distributed package') # Do we actually need this here? if WITH_NNPACK: nnpack_dir = NNPACK_LIB_PATHS[0] print('-- Detected NNPACK at ' + nnpack_dir) else: print('-- Not using NNPACK') # cwrap depends on pyyaml, so we can't import it earlier from tools.cwrap import cwrap from tools.cwrap.plugins.THPPlugin import THPPlugin from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin from tools.cwrap.plugins.AutoGPU import AutoGPU from tools.cwrap.plugins.BoolOption import BoolOption from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin from tools.cwrap.plugins.NullableArguments import NullableArguments from tools.cwrap.plugins.CuDNNPlugin import CuDNNPlugin from tools.cwrap.plugins.WrapDim import WrapDim from tools.cwrap.plugins.AssertNDim import AssertNDim from tools.cwrap.plugins.Broadcast import Broadcast from tools.cwrap.plugins.ProcessorSpecificPlugin import ProcessorSpecificPlugin from tools.autograd.gen_variable_type import gen_variable_type from tools.jit.gen_jit_dispatch import gen_jit_dispatch thp_plugin = THPPlugin() cwrap('torch/csrc/generic/TensorMethods.cwrap', plugins=[ ProcessorSpecificPlugin(), BoolOption(), thp_plugin, AutoGPU(condition='IS_CUDA'), ArgcountSortPlugin(), KwargsPlugin(), AssertNDim(), WrapDim(), Broadcast() ]) cwrap('torch/csrc/cudnn/cuDNN.cwrap', plugins=[CuDNNPlugin(), NullableArguments()]) # Build ATen based Variable classes autograd_gen_dir = 'torch/csrc/autograd/generated' jit_gen_dir = 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.mkdir(d) gen_variable_type('torch/lib/build/ATen/ATen/Declarations.yaml', autograd_gen_dir) gen_jit_dispatch('torch/lib/build/ATen/ATen/Declarations.yaml', jit_gen_dir) # It's an old-style class in Python 2.7... setuptools.command.build_ext.build_ext.run(self)
def run(self): # Print build options if WITH_NUMPY: print('-- Building with NumPy bindings') else: print('-- NumPy not found') if WITH_CUDNN: print('-- Detected cuDNN at ' + CUDNN_LIB_DIR + ', ' + CUDNN_INCLUDE_DIR) else: print('-- Not using cuDNN') if WITH_CUDA: print('-- Detected CUDA at ' + CUDA_HOME) else: print('-- Not using CUDA') if WITH_NCCL and WITH_SYSTEM_NCCL: print('-- Using system provided NCCL library at ' + NCCL_SYSTEM_LIB + ', ' + NCCL_INCLUDE_DIR) elif WITH_NCCL: print('-- Building NCCL library') else: print('-- Not using NCCL') if WITH_DISTRIBUTED: print('-- Building with distributed package ') monkey_patch_THD_link_flags() else: print('-- Building without distributed package') # Do we actually need this here? if WITH_NNPACK: nnpack_dir = NNPACK_LIB_PATHS[0] print('-- Detected NNPACK at ' + nnpack_dir) else: print('-- Not using NNPACK') # cwrap depends on pyyaml, so we can't import it earlier from tools.cwrap import cwrap from tools.cwrap.plugins.THPPlugin import THPPlugin from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin from tools.cwrap.plugins.AutoGPU import AutoGPU from tools.cwrap.plugins.BoolOption import BoolOption from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin from tools.cwrap.plugins.NullableArguments import NullableArguments from tools.cwrap.plugins.CuDNNPlugin import CuDNNPlugin from tools.cwrap.plugins.WrapDim import WrapDim from tools.cwrap.plugins.AssertNDim import AssertNDim from tools.cwrap.plugins.Broadcast import Broadcast from tools.cwrap.plugins.ProcessorSpecificPlugin import ProcessorSpecificPlugin from tools.autograd.gen_variable_type import gen_variable_type from tools.jit.gen_jit_dispatch import gen_jit_dispatch thp_plugin = THPPlugin() cwrap('torch/csrc/generic/TensorMethods.cwrap', plugins=[ ProcessorSpecificPlugin(), BoolOption(), thp_plugin, AutoGPU(condition='IS_CUDA'), ArgcountSortPlugin(), KwargsPlugin(), AssertNDim(), WrapDim(), Broadcast() ]) cwrap('torch/csrc/cudnn/cuDNN.cwrap', plugins=[CuDNNPlugin(), NullableArguments()]) # Build ATen based Variable classes autograd_gen_dir = 'torch/csrc/autograd/generated' jit_gen_dir = 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.mkdir(d) gen_variable_type('torch/lib/tmp_install/share/ATen/Declarations.yaml', autograd_gen_dir) gen_jit_dispatch('torch/lib/tmp_install/share/ATen/Declarations.yaml', jit_gen_dir) if IS_WINDOWS: build_temp = self.build_temp build_dir = 'torch/csrc' ext_filename = self.get_ext_filename('_C') lib_filename = '.'.join(ext_filename.split('.')[:-1]) + '.lib' _C_LIB = os.path.join(build_temp, build_dir, lib_filename).replace('\\', '/') THNN.extra_link_args += [_C_LIB] if WITH_CUDA: THCUNN.extra_link_args += [_C_LIB] else: # To generate .obj files for AutoGPU for the export class # a header file cannot build, so it has to be copied to someplace as a source file if os.path.exists("torch/csrc/generated/AutoGPU_cpu_win.cpp"): os.remove("torch/csrc/generated/AutoGPU_cpu_win.cpp") shutil.copyfile("torch/csrc/cuda/AutoGPU.h", "torch/csrc/generated/AutoGPU_cpu_win.cpp") # It's an old-style class in Python 2.7... setuptools.command.build_ext.build_ext.run(self)
def run(self): # Print build options if WITH_NUMPY: print('-- Building with NumPy bindings') else: print('-- NumPy not found') if WITH_CUDNN: print('-- Detected cuDNN at ' + CUDNN_LIB_DIR + ', ' + CUDNN_INCLUDE_DIR) else: print('-- Not using cuDNN') if WITH_CUDA: print('-- Detected CUDA at ' + CUDA_HOME) else: print('-- Not using CUDA') if WITH_NCCL and WITH_SYSTEM_NCCL: print('-- Using system provided NCCL library at ' + NCCL_SYSTEM_LIB + ', ' + NCCL_INCLUDE_DIR) elif WITH_NCCL: print('-- Building NCCL library') else: print('-- Not using NCCL') if WITH_DISTRIBUTED: print('-- Building with distributed package ') monkey_patch_THD_link_flags() else: print('-- Building without distributed package') # Do we actually need this here? if WITH_NNPACK: nnpack_dir = NNPACK_LIB_PATHS[0] print('-- Detected NNPACK at ' + nnpack_dir) else: print('-- Not using NNPACK') # cwrap depends on pyyaml, so we can't import it earlier from tools.cwrap import cwrap from tools.cwrap.plugins.THPPlugin import THPPlugin from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin from tools.cwrap.plugins.AutoGPU import AutoGPU from tools.cwrap.plugins.BoolOption import BoolOption from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin from tools.cwrap.plugins.NullableArguments import NullableArguments from tools.cwrap.plugins.CuDNNPlugin import CuDNNPlugin from tools.cwrap.plugins.WrapDim import WrapDim from tools.cwrap.plugins.AssertNDim import AssertNDim from tools.cwrap.plugins.Broadcast import Broadcast from tools.cwrap.plugins.ProcessorSpecificPlugin import ProcessorSpecificPlugin from tools.autograd.gen_variable_type import gen_variable_type from tools.jit.gen_jit_dispatch import gen_jit_dispatch thp_plugin = THPPlugin() cwrap('torch/csrc/generic/TensorMethods.cwrap', plugins=[ ProcessorSpecificPlugin(), BoolOption(), thp_plugin, AutoGPU(condition='IS_CUDA'), ArgcountSortPlugin(), KwargsPlugin(), AssertNDim(), WrapDim(), Broadcast() ]) cwrap('torch/csrc/cudnn/cuDNN.cwrap', plugins=[ CuDNNPlugin(), NullableArguments() ]) # Build ATen based Variable classes autograd_gen_dir = 'torch/csrc/autograd/generated' jit_gen_dir = 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.mkdir(d) gen_variable_type( 'torch/lib/tmp_install/share/ATen/Declarations.yaml', autograd_gen_dir) gen_jit_dispatch( 'torch/lib/tmp_install/share/ATen/Declarations.yaml', jit_gen_dir) if IS_WINDOWS: build_temp = self.build_temp build_dir = 'torch/csrc' ext_filename = self.get_ext_filename('_C') lib_filename = '.'.join(ext_filename.split('.')[:-1]) + '.lib' _C_LIB = os.path.join(build_temp, build_dir, lib_filename).replace('\\', '/') THNN.extra_link_args += [_C_LIB] if WITH_CUDA: THCUNN.extra_link_args += [_C_LIB] else: # To generate .obj files for AutoGPU for the export class # a header file cannot build, so it has to be copied to someplace as a source file if os.path.exists("torch/csrc/generated/AutoGPU_cpu_win.cpp"): os.remove("torch/csrc/generated/AutoGPU_cpu_win.cpp") shutil.copyfile("torch/csrc/cuda/AutoGPU.h", "torch/csrc/generated/AutoGPU_cpu_win.cpp") # It's an old-style class in Python 2.7... setuptools.command.build_ext.build_ext.run(self)