def generate_code(ninja_global=None, declarations_path=None, nn_path=None, install_dir=None): # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) from tools.autograd.gen_autograd import gen_autograd from tools.jit.gen_jit_dispatch import gen_jit_dispatch from tools.nnwrap import generate_wrappers as generate_nn_wrappers # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily # used by the legacy NN bindings. generate_nn_wrappers(nn_path, install_dir, 'tools/cwrap/plugins/templates') # Build ATen based Variable classes autograd_gen_dir = install_dir or 'torch/csrc/autograd/generated' jit_gen_dir = install_dir or 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.makedirs(d) gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir, 'tools/autograd') gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir, 'tools/jit/templates')
def build_libs(libs): for lib in libs: assert lib in dep_libs, 'invalid lib: {}'.format(lib) if IS_WINDOWS: build_libs_cmd = ['torch\\lib\\build_libs.bat'] else: build_libs_cmd = ['bash', 'torch/lib/build_libs.sh'] my_env = os.environ.copy() my_env["PYTORCH_PYTHON"] = sys.executable if not IS_WINDOWS: if WITH_NINJA: my_env["CMAKE_GENERATOR"] = '-GNinja' my_env["CMAKE_INSTALL"] = 'ninja install' else: my_env['CMAKE_GENERATOR'] = '' my_env['CMAKE_INSTALL'] = 'make install' if WITH_SYSTEM_NCCL: my_env["NCCL_ROOT_DIR"] = NCCL_ROOT_DIR if WITH_CUDA: my_env["CUDA_BIN_PATH"] = CUDA_HOME build_libs_cmd += ['--with-cuda'] if WITH_CUDNN: my_env["CUDNN_LIB_DIR"] = CUDNN_LIB_DIR my_env["CUDNN_INCLUDE_DIR"] = CUDNN_INCLUDE_DIR if subprocess.call(build_libs_cmd + libs, env=my_env) != 0: sys.exit(1) if 'ATen' in libs: from tools.nnwrap import generate_wrappers as generate_nn_wrappers generate_nn_wrappers()
def build_libs(libs): for lib in libs: assert lib in dep_libs, 'invalid lib: {}'.format(lib) if IS_WINDOWS: build_libs_cmd = ['torch\\lib\\build_libs.bat'] else: build_libs_cmd = ['bash', 'torch/lib/build_libs.sh'] my_env = os.environ.copy() my_env["PYTORCH_PYTHON"] = sys.executable if not IS_WINDOWS: if WITH_NINJA: my_env["CMAKE_GENERATOR"] = '-GNinja' my_env["CMAKE_INSTALL"] = 'ninja install' else: my_env['CMAKE_GENERATOR'] = '' my_env['CMAKE_INSTALL'] = 'make install' if WITH_SYSTEM_NCCL: my_env["NCCL_ROOT_DIR"] = NCCL_ROOT_DIR if WITH_CUDA: my_env["CUDA_BIN_PATH"] = CUDA_HOME build_libs_cmd += ['--with-cuda'] if WITH_NNPACK: build_libs_cmd += ['--with-nnpack'] if WITH_CUDNN: my_env["CUDNN_LIB_DIR"] = CUDNN_LIB_DIR my_env["CUDNN_INCLUDE_DIR"] = CUDNN_INCLUDE_DIR if subprocess.call(build_libs_cmd + libs, env=my_env) != 0: sys.exit(1) if 'ATen' in libs: from tools.nnwrap import generate_wrappers as generate_nn_wrappers generate_nn_wrappers()
def generate_code(ninja_global=None, declarations_path=None, nn_path=None, install_dir=None): # if ninja is enabled, we just register this file as something # ninja will need to call if needed if ninja_global is not None: return generate_code_ninja(ninja_global) # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) from tools.autograd.gen_autograd import gen_autograd from tools.jit.gen_jit_dispatch import gen_jit_dispatch from tools.nnwrap import generate_wrappers as generate_nn_wrappers # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily # used by the legacy NN bindings. generate_nn_wrappers(nn_path, install_dir, 'tools/cwrap/plugins/templates') # Build ATen based Variable classes autograd_gen_dir = install_dir or 'torch/csrc/autograd/generated' jit_gen_dir = install_dir or 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.makedirs(d) gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir, 'tools/autograd') gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir, 'tools/jit/templates')
def generate_code(ninja_global=None, declarations_path=None, nn_path=None): # if ninja is enabled, we just register this file as something # ninja will need to call if needed if ninja_global is not None: return generate_code_ninja(ninja_global) # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) from tools.autograd.gen_autograd import gen_autograd from tools.jit.gen_jit_dispatch import gen_jit_dispatch from tools.nnwrap import generate_wrappers as generate_nn_wrappers # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily # used by the legacy NN bindings. generate_nn_wrappers(nn_path) # Build ATen based Variable classes autograd_gen_dir = 'torch/csrc/autograd/generated' jit_gen_dir = 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.mkdir(d) gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir) gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir)
def generate_code(ninja_global=None): # if ninja is enabled, we just register this file as something # ninja will need to call if needed if ninja_global is not None: return generate_code_ninja(ninja_global) # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) from tools.autograd.gen_autograd import gen_autograd from tools.jit.gen_jit_dispatch import gen_jit_dispatch from tools.nnwrap import generate_wrappers as generate_nn_wrappers # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily # used by the legacy NN bindings. generate_nn_wrappers() # Build ATen based Variable classes autograd_gen_dir = 'torch/csrc/autograd/generated' jit_gen_dir = 'torch/csrc/jit/generated' for d in (autograd_gen_dir, jit_gen_dir): if not os.path.exists(d): os.mkdir(d) gen_autograd( 'torch/lib/tmp_install/share/ATen/Declarations.yaml', autograd_gen_dir) gen_jit_dispatch( 'torch/lib/tmp_install/share/ATen/Declarations.yaml', jit_gen_dir)
def run(self): from tools.nnwrap import generate_wrappers as generate_nn_wrappers build_all_cmd = ['bash', 'torch/lib/build_all.sh'] if WITH_CUDA: build_all_cmd += ['--with-cuda'] if subprocess.call(build_all_cmd) != 0: sys.exit(1) generate_nn_wrappers()
def run(self): from tools.nnwrap import generate_wrappers as generate_nn_wrappers build_all_cmd = ['bash', 'torch/lib/build_all.sh'] if WITH_CUDA: build_all_cmd += ['--with-cuda'] if WITH_NCCL and not SYSTEM_NCCL: build_all_cmd += ['--with-nccl'] if WITH_DISTRIBUTED: build_all_cmd += ['--with-distributed'] if subprocess.call(build_all_cmd) != 0: sys.exit(1) generate_nn_wrappers()
def run(self): from tools.nnwrap import generate_wrappers as generate_nn_wrappers build_all_cmd = ['bash', 'torch/lib/build_all.sh'] if WITH_CUDA: build_all_cmd += ['--with-cuda'] if WITH_NCCL and not SYSTEM_NCCL: build_all_cmd += ['--with-nccl'] if WITH_DISTRIBUTED: build_all_cmd += ['--with-distributed'] if subprocess.call(build_all_cmd) != 0: sys.exit(1) generate_nn_wrappers()
def run(self): libs = ['TH', 'THS', 'THNN'] if WITH_CUDA: libs += ['THC', 'THCS', 'THCUNN'] if WITH_NCCL and not SYSTEM_NCCL: libs += ['nccl'] libs += ['THPP', 'libshm', 'ATen', 'nanopb'] if WITH_DISTRIBUTED: if sys.platform.startswith('linux'): libs += ['gloo'] libs += ['THD'] build_libs(libs) from tools.nnwrap import generate_wrappers as generate_nn_wrappers generate_nn_wrappers()
def build_libs(libs): for lib in libs: assert lib in dep_libs, 'invalid lib: {}'.format(lib) build_libs_cmd = ['bash', 'torch/lib/build_libs.sh'] my_env = os.environ.copy() my_env["PYTORCH_PYTHON"] = sys.executable if WITH_SYSTEM_NCCL: my_env["NCCL_ROOT_DIR"] = NCCL_ROOT_DIR if WITH_CUDA: my_env["CUDA_BIN_PATH"] = CUDA_HOME build_libs_cmd += ['--with-cuda'] if subprocess.call(build_libs_cmd + libs, env=my_env) != 0: sys.exit(1) if 'THNN' in libs or 'THCUNN' in libs: from tools.nnwrap import generate_wrappers as generate_nn_wrappers generate_nn_wrappers()