示例#1
0
文件: setup.py 项目: daadaada/triton
    def build_extension(self, ext):
        #self.debug = True
        extdir = os.path.abspath(
            os.path.dirname(self.get_ext_fullpath(ext.path)))
        # python directories
        python_include_dirs = distutils.sysconfig.get_python_inc()
        python_lib_dirs = distutils.sysconfig.get_config_var('LIBDIR')
        torch_include_dirs = include_paths(True)
        torch_library_dirs = library_paths(True)
        cxx11abi = str(int(torch._C._GLIBCXX_USE_CXX11_ABI))
        cmake_args = [
            '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
            '-DBUILD_TESTS=OFF',
            '-DBUILD_PYTHON_MODULE=ON',
            #'-DPYTHON_EXECUTABLE=' + sys.executable,
            #'-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON,
            '-DPYTHON_INCLUDE_DIRS=' +
            ';'.join([python_include_dirs] + include_paths(True)),
            '-DPYTHON_LINK_DIRS=' + ';'.join(library_paths(True)),
            '-DTORCH_CXX11_ABI=' + cxx11abi,
            '-DTORCH_LIBRARIES=c10;c10_cuda;torch;torch_cuda;torch_cpu;torch_python;triton',
            '-DLLVM_CONFIG=' + find_llvm()
        ]
        # configuration
        cfg = 'Debug' if self.debug else 'Release'
        cfg = 'Release'
        build_args = ['--config', cfg]

        if platform.system() == "Windows":
            cmake_args += [
                '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
                    cfg.upper(), extdir)
            ]
            if sys.maxsize > 2**32:
                cmake_args += ['-A', 'x64']
            build_args += ['--', '/m']
        else:
            cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
            build_args += ['--', '-j4']

        env = os.environ.copy()
        if not os.path.exists(self.build_temp):
            os.makedirs(self.build_temp)
        sourcedir = os.path.abspath(
            os.path.join(os.path.dirname(__file__), 'src'))
        subprocess.check_call(['cmake', sourcedir] + cmake_args,
                              cwd=self.build_temp,
                              env=env)
        subprocess.check_call(['cmake', '--build', '.'] + build_args,
                              cwd=self.build_temp)
示例#2
0
    def build_cmake(self, extension: CMakeExtension):
        """
        The steps required to build the extension
        """
        build_dir = pathlib.Path('.'.join([self.build_temp, extension.name]))

        build_dir.mkdir(parents=True, exist_ok=True)
        install_dir = TORCH_CCL_PATH

        # Now that the necessary directories are created, build
        my_env = os.environ.copy()

        build_options = {
            # The value cannot be easily obtained in CMakeLists.txt.
            'PYTHON_INCLUDE_DIRS':
            str(distutils.sysconfig.get_python_inc()),
            'PYTORCH_INCLUDE_DIRS':
            CMakeExtension.convert_cmake_dirs(include_paths()),
            'PYTORCH_LIBRARY_DIRS':
            CMakeExtension.convert_cmake_dirs(library_paths()),
        }

        extension.generate(build_options, my_env, build_dir, install_dir)

        max_jobs = os.getenv('MAX_JOBS', str(multiprocessing.cpu_count()))
        build_args = ['-j', max_jobs]
        check_call(['make', 'torch_ccl'] + build_args,
                   cwd=str(build_dir),
                   env=my_env)
        check_call(['make', 'install'], cwd=str(build_dir), env=my_env)
示例#3
0
def get_jittable_extension():
    ext_modules = []
    extensions_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                  'brevitas', 'csrc')

    sources = glob.glob(os.path.join(extensions_dir, '*.cpp'))
    sources = [os.path.join(extensions_dir, s) for s in sources]
    include_dirs = [extensions_dir] + include_paths()
    define_macros = []
    libraries = []
    library_dirs = []
    extra_compile_args = {}

    if sys.platform == 'win32':
        define_macros += [('brevitas_EXPORTS', None)]
        extra_compile_args.setdefault('cxx', [])
        extra_compile_args['cxx'].append('/MP')
        library_dirs += library_paths()
        libraries.append('c10')
        libraries.append('torch')
        libraries.append('torch_python')
        libraries.append('_C')

    jittable_ext = JittableExtension('brevitas._C',
                                     language='c++',
                                     sources=sources,
                                     libraries=libraries,
                                     library_dirs=library_dirs,
                                     include_dirs=include_dirs,
                                     define_macros=define_macros,
                                     extra_compile_args=extra_compile_args)
    ext_modules.append(jittable_ext)
    return ext_modules
示例#4
0
def main():
    extra_compile_args = []
    extra_link_args = []

    grpc_objects = [
        f"{PREFIX}/lib/libgrpc++.a",
        f"{PREFIX}/lib/libgrpc.a",
        f"{PREFIX}/lib/libgpr.a",
        f"{PREFIX}/lib/libaddress_sorting.a",
    ]

    include_dirs = cpp_extension.include_paths() + [
        np.get_include(),
        f"{PREFIX}/include",
    ]
    libraries = []

    if sys.platform == "darwin":
        extra_compile_args += ["-stdlib=libc++", "-mmacosx-version-min=10.14"]
        extra_link_args += ["-stdlib=libc++", "-mmacosx-version-min=10.14"]

        # Relevant only when c-cares is not embedded in grpc, e.g. when
        # installing grpc via homebrew.
        libraries.append("cares")
    elif sys.platform == "linux":
        libraries.append("z")

    grpc_objects.append(f"{PREFIX}/lib/libprotobuf.a")

    libtorchbeast = cpp_extension.CppExtension(
        name="libtorchbeast._C",
        sources=[
            "libtorchbeast/libtorchbeast.cc",
            "libtorchbeast/actorpool.cc",
            "libtorchbeast/rpcenv.cc",
            "libtorchbeast/rpcenv.pb.cc",
            "libtorchbeast/rpcenv.grpc.pb.cc",
        ],
        include_dirs=include_dirs,
        libraries=libraries,
        language="c++",
        extra_compile_args=["-std=c++17"] + extra_compile_args,
        extra_link_args=extra_link_args,
        extra_objects=grpc_objects,
    )

    setuptools.setup(
        name="libtorchbeast",
        packages=["libtorchbeast"],
        version="0.0.14",
        ext_modules=[libtorchbeast],
        cmdclass={"build_ext": build_ext},
        test_suite="setup.test_suite",
        install_requires=[
            'setuptools'
        ],  # HACK: any package is ok, but somehow must not be empty
    )
示例#5
0
def is_torch_cuda(build_ext, include_dirs, extra_compile_args):
    try:
        from torch.utils.cpp_extension import include_paths
        test_compile(build_ext, 'test_torch_cuda', include_dirs=include_dirs + include_paths(cuda=True),
                     extra_compile_preargs=extra_compile_args, code=textwrap.dedent('''\
            #include <THC/THC.h>
            void test() {
            }
            '''))
        return True
    except (CompileError, LinkError, EnvironmentError):
        print('INFO: Above error indicates that this PyTorch installation does not support CUDA.')
        return False
示例#6
0
def get_python_c_module():
    main_compile_args = []
    main_libraries = ['torch_ccl']
    main_link_args = []
    main_sources = ["torch_ccl/csrc/_C.cpp"]
    lib_path = os.path.join(TORCH_CCL_PATH, "lib")
    library_dirs = [lib_path]
    include_path = os.path.join(CWD, "src")
    include_dirs = include_paths()
    include_dirs.append(include_path)
    extra_link_args = []
    extra_compile_args = [
        '-Wall',
        '-Wextra',
        '-Wno-strict-overflow',
        '-Wno-unused-parameter',
        '-Wno-missing-field-initializers',
        '-Wno-write-strings',
        '-Wno-unknown-pragmas',
        # This is required for Python 2 declarations that are deprecated in 3.
        '-Wno-deprecated-declarations',
        # Python 2.6 requires -fno-strict-aliasing, see
        # http://legacy.python.org/dev/peps/pep-3123/
        # We also depend on it in our code (even Python 3).
        '-fno-strict-aliasing',
        # Clang has an unfixed bug leading to spurious missing
        # braces warnings, see
        # https://bugs.llvm.org/show_bug.cgi?id=21629
        '-Wno-missing-braces',
    ]

    def make_relative_rpath(path):
        return '-Wl,-rpath,$ORIGIN/' + path

    _c_module = Extension("torch_ccl._C",
                          libraries=main_libraries,
                          sources=main_sources,
                          language='c',
                          extra_compile_args=main_compile_args +
                          extra_compile_args,
                          include_dirs=include_dirs,
                          library_dirs=library_dirs,
                          extra_link_args=extra_link_args + main_link_args +
                          [make_relative_rpath('lib')])

    return _c_module
示例#7
0
def HIPExtension(name, sources, *args, **kwargs):
    include_dirs = kwargs.get('include_dirs', [])
    include_dirs += include_paths() + [
        #non-public includea are needed for using hip+pytroch
        #default paths for docker-based pytorch installation
        os.getenv("PYTORCH_HIP_INCLUDE", "/pytorch/torch/include/"),
        os.getenv("PYTORCH_HIP_INCLUDE2", "/pytorch/aten/src/"),
        os.getenv("ROCM_INCLUDE", "/opt/rocm/include/"),
        os.getenv("HIPRAND_INCLUDE", "/opt/rocm/hiprand/include/"),
        os.getenv("ROCRAND_INCLUDE", "/opt/rocm/rocrand/include/"),
    ]
    kwargs['include_dirs'] = include_dirs
    kwargs['language'] = 'c++'

    #combine nvcc compile args and cxx compile args, since all source swould be treated as cxx with hipcc compiler
    extra_compile_args_dict = kwargs.get('extra_compile_args', {})
    extra_compile_args_list = extra_compile_args_dict.get(
        'cxx', []) + extra_compile_args_dict.get('nvcc', [])
    kwargs['extra_compile_args'] = extra_compile_args_list
    return Extension(name, sources, *args, **kwargs)
示例#8
0
    def __init__(self, name, sourcedir=''):
        kwargs = dict()

        include_dirs = kwargs.get('include_dirs', [])
        include_dirs += include_paths()
        kwargs['include_dirs'] = include_dirs

        if sys.platform == 'win32':
            library_dirs = kwargs.get('library_dirs', [])
            library_dirs += library_paths()
            kwargs['library_dirs'] = library_dirs

            libraries = kwargs.get('libraries', [])
            libraries.append('caffe2')
            libraries.append('_C')
            kwargs['libraries'] = libraries

        kwargs['language'] = 'c++'

        Extension.__init__(self, name, sources=[], **kwargs)
        self.sourcedir = os.path.abspath(sourcedir)
示例#9
0
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import sys
import setuptools

from torch.utils import cpp_extension

print(cpp_extension.include_paths())

__version__ = '0.0.1'

extensions = [
    cpp_extension.CppExtension(
        'src.qp_fast',
        ["src/qp_fast.cpp"],
        language='c++',
        extra_compile_args=['-std=c++17'],
    ),
]

setup(name='latent_decision_tree',
      version=__version__,
      author="VZ,MK,VN",
      ext_modules=extensions,
      setup_requires=['pybind11>=2.5.0'],
      cmdclass={'build_ext': cpp_extension.BuildExtension},
      zip_safe=False)
示例#10
0
文件: setup.py 项目: v-qjqs/mmcv
def get_extensions():
    extensions = []

    if os.getenv('MMCV_WITH_TRT', '0') != '0':
        ext_name = 'mmcv._ext_trt'
        from torch.utils.cpp_extension import include_paths, library_paths
        library_dirs = []
        libraries = []
        include_dirs = []
        tensorrt_path = os.getenv('TENSORRT_DIR', '0')
        tensorrt_lib_path = glob.glob(
            os.path.join(tensorrt_path, 'targets', '*', 'lib'))[0]
        library_dirs += [tensorrt_lib_path]
        libraries += ['nvinfer', 'nvparsers', 'nvinfer_plugin']
        libraries += ['cudart']
        define_macros = []
        extra_compile_args = {'cxx': []}

        include_path = os.path.abspath('./mmcv/ops/csrc/common/cuda')
        include_trt_path = os.path.abspath('./mmcv/ops/csrc/tensorrt')
        include_dirs.append(include_path)
        include_dirs.append(include_trt_path)
        include_dirs.append(os.path.join(tensorrt_path, 'include'))
        include_dirs += include_paths(cuda=True)

        op_files = glob.glob('./mmcv/ops/csrc/tensorrt/plugins/*')
        define_macros += [('MMCV_WITH_CUDA', None)]
        define_macros += [('MMCV_WITH_TRT', None)]
        cuda_args = os.getenv('MMCV_CUDA_ARGS')
        extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
        library_dirs += library_paths(cuda=True)

        from setuptools import Extension
        ext_ops = Extension(
            name=ext_name,
            sources=op_files,
            include_dirs=include_dirs,
            define_macros=define_macros,
            extra_compile_args=extra_compile_args,
            language='c++',
            library_dirs=library_dirs,
            libraries=libraries)
        extensions.append(ext_ops)

    if os.getenv('MMCV_WITH_OPS', '0') == '0':
        return extensions

    if EXT_TYPE == 'parrots':
        ext_name = 'mmcv._ext'
        from parrots.utils.build_extension import Extension
        # new parrots op impl do not use MMCV_USE_PARROTS
        # define_macros = [('MMCV_USE_PARROTS', None)]
        define_macros = []
        include_dirs = []
        op_files = glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') +\
            glob.glob('./mmcv/ops/csrc/parrots/*.cpp')
        include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
        include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda'))
        cuda_args = os.getenv('MMCV_CUDA_ARGS')
        extra_compile_args = {
            'nvcc': [cuda_args] if cuda_args else [],
            'cxx': [],
        }
        if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
            define_macros += [('MMCV_WITH_CUDA', None)]
            extra_compile_args['nvcc'] += [
                '-D__CUDA_NO_HALF_OPERATORS__',
                '-D__CUDA_NO_HALF_CONVERSIONS__',
                '-D__CUDA_NO_HALF2_OPERATORS__',
            ]
        ext_ops = Extension(
            name=ext_name,
            sources=op_files,
            include_dirs=include_dirs,
            define_macros=define_macros,
            extra_compile_args=extra_compile_args,
            cuda=True,
            pytorch=True)
        extensions.append(ext_ops)
    elif EXT_TYPE == 'pytorch':
        ext_name = 'mmcv._ext'
        from torch.utils.cpp_extension import CppExtension, CUDAExtension

        # prevent ninja from using too many resources
        os.environ.setdefault('MAX_JOBS', '4')
        define_macros = []
        extra_compile_args = {'cxx': []}
        include_dirs = []

        is_rocm_pytorch = False
        try:
            from torch.utils.cpp_extension import ROCM_HOME
            is_rocm_pytorch = True if ((torch.version.hip is not None) and
                                       (ROCM_HOME is not None)) else False
        except ImportError:
            pass

        project_dir = 'mmcv/ops/csrc/'
        if is_rocm_pytorch:
            from torch.utils.hipify import hipify_python

            hipify_python.hipify(
                project_directory=project_dir,
                output_directory=project_dir,
                includes='mmcv/ops/csrc/*',
                show_detailed=True,
                is_pytorch_extension=True,
            )
            define_macros += [('MMCV_WITH_CUDA', None)]
            define_macros += [('HIP_DIFF', None)]
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
            extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/hip/*')
            extension = CUDAExtension
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/hip'))
        elif torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
            define_macros += [('MMCV_WITH_CUDA', None)]
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
            extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu')
            extension = CUDAExtension
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda'))
        else:
            print(f'Compiling {ext_name} without CUDA')
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp')
            extension = CppExtension
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))

        ext_ops = extension(
            name=ext_name,
            sources=op_files,
            include_dirs=include_dirs,
            define_macros=define_macros,
            extra_compile_args=extra_compile_args)
        extensions.append(ext_ops)

    if EXT_TYPE == 'pytorch' and os.getenv('MMCV_WITH_ORT', '0') != '0':
        ext_name = 'mmcv._ext_ort'
        from torch.utils.cpp_extension import library_paths, include_paths
        import onnxruntime
        library_dirs = []
        libraries = []
        include_dirs = []
        ort_path = os.getenv('ONNXRUNTIME_DIR', '0')
        library_dirs += [os.path.join(ort_path, 'lib')]
        libraries.append('onnxruntime')
        define_macros = []
        extra_compile_args = {'cxx': []}

        include_path = os.path.abspath('./mmcv/ops/csrc/onnxruntime')
        include_dirs.append(include_path)
        include_dirs.append(os.path.join(ort_path, 'include'))

        op_files = glob.glob('./mmcv/ops/csrc/onnxruntime/cpu/*')
        if onnxruntime.get_device() == 'GPU' or os.getenv('FORCE_CUDA',
                                                          '0') == '1':
            define_macros += [('MMCV_WITH_CUDA', None)]
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
            extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
            op_files += glob.glob('./mmcv/ops/csrc/onnxruntime/gpu/*')
            include_dirs += include_paths(cuda=True)
            library_dirs += library_paths(cuda=True)
        else:
            include_dirs += include_paths(cuda=False)
            library_dirs += library_paths(cuda=False)

        from setuptools import Extension
        ext_ops = Extension(
            name=ext_name,
            sources=op_files,
            include_dirs=include_dirs,
            define_macros=define_macros,
            extra_compile_args=extra_compile_args,
            language='c++',
            library_dirs=library_dirs,
            libraries=libraries)
        extensions.append(ext_ops)

    return extensions
示例#11
0
文件: __init__.py 项目: reiyw/keops
import pykeops

##########################################################
# Search for Pytorch and a GPU

torch_version_required = '1.0'

# is torch installed ?
import torch
from torch.utils.cpp_extension import include_paths

include_dirs = include_paths()[0:2]

if torch.__version__ < torch_version_required:
    raise ImportError('The pytorch version should be >=' +
                      torch_version_required)

pykeops.gpu_available = torch.cuda.is_available()  # use torch to detect gpu
pykeops.torch_found = True

default_dtype = 'float32'

##########################################################
# Import pyKeOps routines

from .generic.generic_red import Genred
from .operations import KernelSolve
from .kernel_product.kernels import Kernel, kernel_product, kernel_formulas
from .generic.generic_ops import generic_sum, generic_logsumexp, generic_argmin, generic_argkmin
from .kernel_product.formula import Formula
from pykeops.common.lazy_tensor import LazyTensor, Vi, Vj, Pm
示例#12
0
if compile_test('bzlib.h', 'bz2'):
    compile_args.append('-DHAVE_BZLIB')
    ext_libs.append('bz2')

if compile_test('lzma.h', 'lzma'):
    compile_args.append('-DHAVE_XZLIB')
    ext_libs.append('lzma')

third_party_libs = ["kenlm", "openfst-1.6.7/src/include", "ThreadPool", "boost_1_67_0", "utf8"]
compile_args.extend(['-DINCLUDE_KENLM', '-DKENLM_MAX_ORDER=6'])
lib_sources = glob.glob('third_party/kenlm/util/*.cc') + glob.glob('third_party/kenlm/lm/*.cc') + glob.glob(
    'third_party/kenlm/util/double-conversion/*.cc') + glob.glob('third_party/openfst-1.6.7/src/lib/*.cc')
lib_sources = [fn for fn in lib_sources if not (fn.endswith('main.cc') or fn.endswith('test.cc'))]

third_party_includes = [os.path.realpath(os.path.join("third_party", lib)) for lib in third_party_libs]
ctc_sources = glob.glob('ctcdecode/src/*.cpp')


extension = CppExtension(
   name='ctcdecode._ext.ctc_decode',
   package=True,
   with_cuda=False,
   sources=ctc_sources + lib_sources,
   include_dirs=third_party_includes + include_paths(),
   libraries=ext_libs,
   extra_compile_args=compile_args,
   language='c++')


示例#13
0
文件: setup.py 项目: jinyx728/CS229
######################################################################
# Copyright 2019. Zhenglin Geng.
# This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
######################################################################
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, BuildExtension, include_paths
import os
from os.path import join

ecos_dir = '../ecos'
eigen_dir = '../eigen'
cmk_dir = 'build_cmk'
# cmk_dir='build_dbg'
print(include_paths())
print('cmk_dir', cmk_dir)

# setup(name='cvx_opt_cpp',
#       ext_modules=[CppExtension('cvx_opt_cpp', ['cvx_opt_py.cpp','cvx_opt_utils.cpp','cvx_opt_forward.cpp','cvx_opt_backward.cpp','ecos_opt.cpp','forward_opt.cpp','backward_opt.cpp'],
#         include_dirs=[join(ecos_dir,'include'),join(ecos_dir,'external/ldl/include'),join(ecos_dir,'external/amd/include'),join(ecos_dir,'external/SuiteSparse_config'),eigen_dir],
#         libraries=['ecos'],
#         library_dirs=[ecos_dir],
#       	extra_compile_args=['-DLDL_LONG', '-DDLONG'])],
#       cmdclass={'build_ext': BuildExtension})

setup(name='cvx_opt_cpp',
      ext_modules=[
          CppExtension('cvx_opt_cpp', ['cvx_opt_py.cpp'],
                       include_dirs=[
                           join(ecos_dir, 'include'),
                           join(ecos_dir, 'external/ldl/include'),
                           join(ecos_dir, 'external/amd/include'),
示例#14
0
def get_extensions():
    extensions = []

    if os.getenv('MMCV_WITH_TRT', '0') != '0':
        ext_name = 'mmcv._ext_trt'
        from torch.utils.cpp_extension import include_paths, library_paths
        library_dirs = []
        libraries = []
        include_dirs = []
        tensorrt_path = os.getenv('TENSORRT_DIR', '0')
        tensorrt_lib_path = glob.glob(
            os.path.join(tensorrt_path, 'targets', '*', 'lib'))[0]
        library_dirs += [tensorrt_lib_path]
        libraries += ['nvinfer', 'nvparsers', 'nvinfer_plugin']
        libraries += ['cudart']
        kwargs = {}
        define_macros = []
        extra_compile_args = {'cxx': []}

        include_path = os.path.abspath('./mmcv/ops/csrc')
        include_trt_path = os.path.abspath('./mmcv/ops/csrc/tensorrt')
        include_dirs.append(include_path)
        include_dirs.append(include_trt_path)
        include_dirs.append(os.path.join(tensorrt_path, 'include'))
        include_dirs += include_paths(cuda=True)

        op_files = glob.glob('./mmcv/ops/csrc/tensorrt/plugins/*')
        define_macros += [('MMCV_WITH_CUDA', None)]
        define_macros += [('MMCV_WITH_TRT', None)]
        cuda_args = os.getenv('MMCV_CUDA_ARGS')
        extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
        library_dirs += library_paths(cuda=True)

        kwargs['library_dirs'] = library_dirs
        kwargs['libraries'] = libraries

        from setuptools import Extension
        ext_ops = Extension(
            name=ext_name,
            sources=op_files,
            include_dirs=include_dirs,
            define_macros=define_macros,
            extra_compile_args=extra_compile_args,
            language='c++',
            library_dirs=library_dirs,
            libraries=libraries)
        extensions.append(ext_ops)

    if os.getenv('MMCV_WITH_OPS', '0') == '0':
        return extensions

    if EXT_TYPE == 'parrots':
        ext_name = 'mmcv._ext'
        from parrots.utils.build_extension import Extension
        define_macros = [('MMCV_USE_PARROTS', None)]
        op_files = glob.glob('./mmcv/ops/csrc/parrots/*')
        include_path = os.path.abspath('./mmcv/ops/csrc')
        cuda_args = os.getenv('MMCV_CUDA_ARGS')
        ext_ops = Extension(
            name=ext_name,
            sources=op_files,
            include_dirs=[include_path],
            define_macros=define_macros,
            extra_compile_args={
                'nvcc': [cuda_args] if cuda_args else [],
                'cxx': [],
            },
            cuda=True)
        extensions.append(ext_ops)
    elif EXT_TYPE == 'pytorch':
        ext_name = 'mmcv._ext'
        from torch.utils.cpp_extension import CppExtension, CUDAExtension

        # prevent ninja from using too many resources
        os.environ.setdefault('MAX_JOBS', '4')
        define_macros = []
        extra_compile_args = {'cxx': []}

        if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
            define_macros += [('MMCV_WITH_CUDA', None)]
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
            extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*')
            extension = CUDAExtension
        else:
            print(f'Compiling {ext_name} without CUDA')
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp')
            extension = CppExtension

        include_path = os.path.abspath('./mmcv/ops/csrc')
        ext_ops = extension(
            name=ext_name,
            sources=op_files,
            include_dirs=[include_path],
            define_macros=define_macros,
            extra_compile_args=extra_compile_args)
        extensions.append(ext_ops)

    if EXT_TYPE == 'pytorch' and os.getenv('MMCV_WITH_ORT', '0') != '0':
        ext_name = 'mmcv._ext_ort'
        from torch.utils.cpp_extension import library_paths, include_paths
        import onnxruntime
        library_dirs = []
        libraries = []
        include_dirs = []
        ort_path = os.getenv('ONNXRUNTIME_DIR', '0')
        library_dirs += [os.path.join(ort_path, 'lib')]
        libraries.append('onnxruntime')
        kwargs = {}
        define_macros = []
        extra_compile_args = {'cxx': []}

        include_path = os.path.abspath('./mmcv/ops/csrc/onnxruntime')
        include_dirs.append(include_path)
        include_dirs.append(os.path.join(ort_path, 'include'))
        include_dirs += include_paths(cuda=True)

        op_files = glob.glob('./mmcv/ops/csrc/onnxruntime/cpu/*')
        if onnxruntime.get_device() == 'GPU' or os.getenv('FORCE_CUDA',
                                                          '0') == '1':
            define_macros += [('MMCV_WITH_CUDA', None)]
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
            extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
            op_files += glob.glob('./mmcv/ops/csrc/onnxruntime/gpu/*')
            library_dirs += library_paths(cuda=True)
        else:
            library_dirs += library_paths(cuda=False)

        kwargs['library_dirs'] = library_dirs
        kwargs['libraries'] = libraries

        from setuptools import Extension
        ext_ops = Extension(
            name=ext_name,
            sources=op_files,
            include_dirs=include_dirs,
            define_macros=define_macros,
            extra_compile_args=extra_compile_args,
            language='c++',
            library_dirs=library_dirs,
            libraries=libraries)
        extensions.append(ext_ops)

    return extensions
示例#15
0
def get_extensions():
    extensions = []
    if (os.getenv('MMCV_WITH_OPS', '0') == '0'):
        return extensions
    if (EXT_TYPE == 'parrots'):
        ext_name = 'mmcv._ext'
        from parrots.utils.build_extension import Extension
        define_macros = [('MMCV_USE_PARROTS', None)]
        op_files = glob.glob('./mmcv/ops/csrc/parrots/*')
        include_path = os.path.abspath('./mmcv/ops/csrc')
        cuda_args = os.getenv('MMCV_CUDA_ARGS')
        ext_ops = Extension(name=ext_name, sources=op_files, include_dirs=[include_path], define_macros=define_macros, extra_compile_args={
            'nvcc': ([cuda_args] if cuda_args else []),
            'cxx': [],
        }, cuda=True)
        extensions.append(ext_ops)
    elif (EXT_TYPE == 'pytorch'):
        ext_name = 'mmcv._ext'
        from torch.utils.cpp_extension import CUDAExtension, CppExtension
        os.environ.setdefault('MAX_JOBS', '4')
        define_macros = []
        extra_compile_args = {
            'cxx': [],
        }
        if (torch.cuda.is_available() or (os.getenv('FORCE_CUDA', '0') == '1')):
            define_macros += [('MMCV_WITH_CUDA', None)]
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
            extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else [])
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*')
            extension = CUDAExtension
        else:
            print(
                ''.join(['Compiling ', '{}'.format(ext_name), ' without CUDA']))
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp')
            extension = CppExtension
        include_path = os.path.abspath('./mmcv/ops/csrc')
        ext_ops = extension(name=ext_name, sources=op_files, include_dirs=[
                            include_path], define_macros=define_macros, extra_compile_args=extra_compile_args)
        extensions.append(ext_ops)
    if ((EXT_TYPE == 'pytorch') and (os.getenv('MMCV_WITH_ORT', '0') != '0')):
        ext_name = 'mmcv._ext_ort'
        from torch.utils.cpp_extension import library_paths, include_paths
        import onnxruntime
        library_dirs = []
        libraries = []
        include_dirs = []
        ort_path = os.getenv('ONNXRUNTIME_DIR', '0')
        library_dirs += [os.path.join(ort_path, 'lib')]
        libraries.append('onnxruntime')
        kwargs = {

        }
        define_macros = []
        extra_compile_args = {
            'cxx': [],
        }
        include_path = os.path.abspath('./mmcv/ops/csrc/onnxruntime')
        include_dirs.append(include_path)
        include_dirs.append(os.path.join(ort_path, 'include'))
        include_dirs += include_paths(cuda=True)
        op_files = glob.glob('./mmcv/ops/csrc/onnxruntime/cpu/*')
        if ((onnxruntime.get_device() == 'GPU') or (os.getenv('FORCE_CUDA', '0') == '1')):
            define_macros += [('MMCV_WITH_CUDA', None)]
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
            extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else [])
            op_files += glob.glob('./mmcv/ops/csrc/onnxruntime/gpu/*')
            library_dirs += library_paths(cuda=True)
        else:
            library_dirs += library_paths(cuda=False)
        kwargs['library_dirs'] = library_dirs
        kwargs['libraries'] = libraries
        from setuptools import Extension
        ext_ops = Extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros,
                            extra_compile_args=extra_compile_args, language='c++', library_dirs=library_dirs, libraries=libraries)
        extensions.append(ext_ops)
    return extensions
示例#16
0
         CTLDIR+'img/singleviewdata.cpp',
         CTLDIR+'mat/homography.cpp',
         CTLDIR+'mat/matrix_algorithm.cpp',
         CTLDIR+'mat/projectionmatrix.cpp',
         CTLDIR+'ocl/openclconfig.cpp',
         CTLDIR+'ocl/cldirfileloader.cpp',
         CTLDIR+'processing/radontransform2d.cpp',
         'src/'+'radon2d_kernel.cpp',
         'src/'+'parallelsetup.cpp',
         'src/'+'simple_backprojector_kernel.cpp',
         'src/'+'simplebackprojector.cpp',
         'src/'+'pybind_radon.cpp'],
        include_dirs=[
            'include',
            CTLDIR,
        ] + cpp_extension.include_paths(cuda=True),
        language='c++',
        libraries=['OpenCL', 'c10', 'caffe2', 'torch', 'torch_python', '_C'],
        library_dirs=cpp_extension.library_paths(cuda=True),
        define_macros=[('OCL_CONFIG_MODULE_AVAILABLE', None), ('NOQT', None)]
    ),
]

setup(
    name='ctl',
    version=__version__,
    author='Philipp Ernst',
    author_email='*****@*****.**',
    description='2D Radon transform using CTL',
    ext_modules=ext_modules,
    cmdclass={'build_ext': cpp_extension.BuildExtension},
示例#17
0
def get_extensions():
    extensions = []

    import numpy
    default_includes=[numpy.get_include()]
    from torch.utils.cpp_extension import include_paths, library_paths
    default_includes += include_paths(cuda=torch.cuda.is_available())
    if os.name == 'nt':
        default_includes.append(os.path.join(os.path.dirname(
            os.path.dirname(sys.executable)),"include" ))

    if os.getenv('MMCV_WITH_TRT', '0') != '0':

        # Following strings of text style are from colorama package
        bright_style, reset_style = '\x1b[1m', '\x1b[0m'
        red_text, blue_text = '\x1b[31m', '\x1b[34m'
        white_background = '\x1b[107m'

        msg = white_background + bright_style + red_text
        msg += 'DeprecationWarning: ' + \
            'Custom TensorRT Ops will be deprecated in future. '
        msg += blue_text + \
            'Welcome to use the unified model deployment toolbox '
        msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
        msg += reset_style
        warnings.warn(msg)

        ext_name = 'mmcv._ext_trt'
        from torch.utils.cpp_extension import include_paths, library_paths
        library_dirs = []
        libraries = []
        include_dirs = default_includes
        tensorrt_path = os.getenv('TENSORRT_DIR', '0')
        tensorrt_lib_path = glob.glob(
            os.path.join(tensorrt_path, 'targets', '*', 'lib'))[0]
        library_dirs += [tensorrt_lib_path]
        libraries += ['nvinfer', 'nvparsers', 'nvinfer_plugin']
        libraries += ['cudart']
        define_macros = []
        extra_compile_args = {'cxx': []}

        include_path = os.path.abspath('./mmcv/ops/csrc/common/cuda')
        include_trt_path = os.path.abspath('./mmcv/ops/csrc/tensorrt')
        include_dirs.append(include_path)
        include_dirs.append(include_trt_path)
        include_dirs.append(os.path.join(tensorrt_path, 'include'))
        include_dirs += include_paths(cuda=True)

        op_files = glob.glob('./mmcv/ops/csrc/tensorrt/plugins/*')
        define_macros += [('MMCV_WITH_CUDA', None)]
        define_macros += [('MMCV_WITH_TRT', None)]
        cuda_args = os.getenv('MMCV_CUDA_ARGS')
        extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
        # prevent cub/thrust conflict with other python library
        # More context See issues #1454
        extra_compile_args['nvcc'] += ['-Xcompiler=-fno-gnu-unique']
        library_dirs += library_paths(cuda=True)

        from setuptools import Extension
        ext_ops = Extension(
            name=ext_name,
            sources=op_files,
            include_dirs=include_dirs,
            define_macros=define_macros,
            extra_compile_args=extra_compile_args,
            language='c++',
            library_dirs=library_dirs,
            libraries=libraries)
        extensions.append(ext_ops)

    if os.getenv('MMCV_WITH_OPS', '0') == '0':
        return extensions

    if EXT_TYPE == 'parrots':
        ext_name = 'mmcv._ext'
        from parrots.utils.build_extension import Extension

        # new parrots op impl do not use MMCV_USE_PARROTS
        # define_macros = [('MMCV_USE_PARROTS', None)]
        define_macros = []
        include_dirs = default_includes
        op_files = glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') +\
            glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp') +\
            glob.glob('./mmcv/ops/csrc/parrots/*.cpp')
        include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
        include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda'))
        cuda_args = os.getenv('MMCV_CUDA_ARGS')
        extra_compile_args = {
            'nvcc': [cuda_args, '-std=c++14'] if cuda_args else ['-std=c++14'],
            'cxx': ['-std=c++14'],
        }
        if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
            define_macros += [('MMCV_WITH_CUDA', None)]
            extra_compile_args['nvcc'] += [
                '-D__CUDA_NO_HALF_OPERATORS__',
                '-D__CUDA_NO_HALF_CONVERSIONS__',
                '-D__CUDA_NO_HALF2_OPERATORS__',
            ]
        ext_ops = Extension(
            name=ext_name,
            sources=op_files,
            include_dirs=include_dirs,
            define_macros=define_macros,
            extra_compile_args=extra_compile_args,
            cuda=True,
            pytorch=True)
        extensions.append(ext_ops)
    elif EXT_TYPE == 'pytorch':
        ext_name = 'mmcv._ext'
        from torch.utils.cpp_extension import CppExtension, CUDAExtension

        # prevent ninja from using too many resources
        try:
            import psutil
            num_cpu = len(psutil.Process().cpu_affinity())
            cpu_use = max(4, num_cpu - 1)
        except (ModuleNotFoundError, AttributeError):
            cpu_use = 4

        os.environ.setdefault('MAX_JOBS', str(cpu_use))
        define_macros = []

        # Before PyTorch1.8.0, when compiling CUDA code, `cxx` is a
        # required key passed to PyTorch. Even if there is no flag passed
        # to cxx, users also need to pass an empty list to PyTorch.
        # Since PyTorch1.8.0, it has a default value so users do not need
        # to pass an empty list anymore.
        # More details at https://github.com/pytorch/pytorch/pull/45956
        extra_compile_args = {'cxx': []}

        # Since the PR (https://github.com/open-mmlab/mmcv/pull/1463) uses
        # c++14 features, the argument ['std=c++14'] must be added here.
        # However, in the windows environment, some standard libraries
        # will depend on c++17 or higher. In fact, for the windows
        # environment, the compiler will choose the appropriate compiler
        # to compile those cpp files, so there is no need to add the
        # argument
        if platform.system() != 'Windows':
            extra_compile_args['cxx'] = ['-std=c++14']

        include_dirs = default_includes

        is_rocm_pytorch = False
        try:
            from torch.utils.cpp_extension import ROCM_HOME
            is_rocm_pytorch = True if ((torch.version.hip is not None) and
                                       (ROCM_HOME is not None)) else False
        except ImportError:
            pass

        if os.getenv('FORCE_NO_ROCM', '0') == '1':
            is_rocm_pytorch = False

        if is_rocm_pytorch or torch.cuda.is_available() or os.getenv(
                'FORCE_CUDA', '0') == '1':
            if is_rocm_pytorch:
                define_macros += [('HIP_DIFF', None)]
            define_macros += [('MMCV_WITH_CUDA', None)]
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
            extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cpp')
            extension = CUDAExtension
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda'))
        else:
            print(f'Compiling {ext_name} without CUDA')
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')
            extension = CppExtension
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))

        # Since the PR (https://github.com/open-mmlab/mmcv/pull/1463) uses
        # c++14 features, the argument ['std=c++14'] must be added here.
        # However, in the windows environment, some standard libraries
        # will depend on c++17 or higher. In fact, for the windows
        # environment, the compiler will choose the appropriate compiler
        # to compile those cpp files, so there is no need to add the
        # argument
        if 'nvcc' in extra_compile_args and platform.system() != 'Windows':
            extra_compile_args['nvcc'] += ['-std=c++14']

        ext_ops = extension(
            name=ext_name,
            sources=op_files,
            include_dirs=include_dirs,
            define_macros=define_macros,
            extra_compile_args=extra_compile_args)
        extensions.append(ext_ops)

    if EXT_TYPE == 'pytorch' and os.getenv('MMCV_WITH_ORT', '0') != '0':

        # Following strings of text style are from colorama package
        bright_style, reset_style = '\x1b[1m', '\x1b[0m'
        red_text, blue_text = '\x1b[31m', '\x1b[34m'
        white_background = '\x1b[107m'

        msg = white_background + bright_style + red_text
        msg += 'DeprecationWarning: ' + \
            'Custom ONNXRuntime Ops will be deprecated in future. '
        msg += blue_text + \
            'Welcome to use the unified model deployment toolbox '
        msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
        msg += reset_style
        warnings.warn(msg)
        ext_name = 'mmcv._ext_ort'
        import onnxruntime
        from torch.utils.cpp_extension import include_paths, library_paths
        library_dirs = []
        libraries = []
        include_dirs = default_includes
        ort_path = os.getenv('ONNXRUNTIME_DIR', '0')
        library_dirs += [os.path.join(ort_path, 'lib')]
        libraries.append('onnxruntime')
        define_macros = []
        extra_compile_args = {'cxx': []}

        include_path = os.path.abspath('./mmcv/ops/csrc/onnxruntime')
        include_dirs.append(include_path)
        include_dirs.append(os.path.join(ort_path, 'include'))

        op_files = glob.glob('./mmcv/ops/csrc/onnxruntime/cpu/*')
        if onnxruntime.get_device() == 'GPU' or os.getenv('FORCE_CUDA',
                                                          '0') == '1':
            define_macros += [('MMCV_WITH_CUDA', None)]
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
            extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
            op_files += glob.glob('./mmcv/ops/csrc/onnxruntime/gpu/*')
            include_dirs += include_paths(cuda=True)
            library_dirs += library_paths(cuda=True)
        else:
            include_dirs += include_paths(cuda=False)
            library_dirs += library_paths(cuda=False)

        from setuptools import Extension
        ext_ops = Extension(
            name=ext_name,
            sources=op_files,
            include_dirs=include_dirs,
            define_macros=define_macros,
            extra_compile_args=extra_compile_args,
            language='c++',
            library_dirs=library_dirs,
            libraries=libraries)
        extensions.append(ext_ops)

    return extensions
示例#18
0
    def __init__(self,
                 halide_root,
                 name,
                 *args,
                 generators=[],
                 gen_cxx="c++",
                 gen_cxxflags=None,
                 extra_sources=[],
                 **kwargs):
        sources = extra_sources
        cuda = False
        for g in generators:
            # Activate cuda in the wrapper whenever we have an op that requires it
            cuda = cuda or g.cuda

        print("CUDA?", cuda)

        compile_args = kwargs.get('extra_compile_args', [])
        compile_args += ["-std=c++14", "-g"]
        if platform.system() == "Darwin":  # on osx libstdc++ causes trouble
            compile_args += ["-stdlib=libc++"]
        kwargs["extra_compile_args"] = compile_args

        include_dirs = kwargs.get('include_dirs', [])
        library_dirs = kwargs.get('library_dirs', [])
        libraries = kwargs.get('libraries', [])

        include_dirs += include_paths(cuda=cuda)
        include_dirs.append(os.path.join(halide_root, "include"))

        if cuda:
            libraries.append('cudart')
            libraries.append('cuda')

        if platform.system() == 'Windows':
            library_dirs += library_paths()
            kwargs['library_dirs'] = library_dirs

            libraries = kwargs.get('libraries', [])
            libraries.append('c10')
            if cuda:
                libraries.append('c10_cuda')
            libraries.append('torch')
            libraries.append('torch_python')
            libraries.append('_C')
            kwargs['libraries'] = libraries

        kwargs['language'] = 'c++'

        if cuda:
            library_dirs += library_paths(cuda=True)

        kwargs['include_dirs'] = include_dirs
        kwargs['library_dirs'] = library_dirs
        kwargs['libraries'] = libraries

        super(HalidePyTorchExtension, self).__init__(name, sources, *args,
                                                     **kwargs)

        # Group generators by source file, so we compile those only once
        self.generators = {}
        self.cuda = cuda
        for g in generators:
            if not g.gen_source in self.generators.keys():
                self.generators[g.gen_source] = []
            self.generators[g.gen_source].append(g)

        self.gen_cxx = gen_cxx
        self.gen_cxxflags = self._get_gen_cxxflags(gen_cxxflags, halide_root)
        self.gen_ldflags = self._get_gen_ldflags(None)
        self.gen_hlsyslibs = self._get_hlsyslibs(None)
        self.gen_deps = self._get_gen_deps(None, halide_root)
        self.get_include = self._get_gen_inc(include_dirs)
示例#19
0
文件: setup.py 项目: jshilong/mmcv
def get_extensions():
    extensions = []

    if os.getenv('MMCV_WITH_TRT', '0') != '0':
        ext_name = 'mmcv._ext_trt'
        from torch.utils.cpp_extension import include_paths, library_paths
        library_dirs = []
        libraries = []
        include_dirs = []
        tensorrt_path = os.getenv('TENSORRT_DIR', '0')
        tensorrt_lib_path = glob.glob(
            os.path.join(tensorrt_path, 'targets', '*', 'lib'))[0]
        library_dirs += [tensorrt_lib_path]
        libraries += ['nvinfer', 'nvparsers', 'nvinfer_plugin']
        libraries += ['cudart']
        define_macros = []
        extra_compile_args = {'cxx': []}

        include_path = os.path.abspath('./mmcv/ops/csrc/common/cuda')
        include_trt_path = os.path.abspath('./mmcv/ops/csrc/tensorrt')
        include_dirs.append(include_path)
        include_dirs.append(include_trt_path)
        include_dirs.append(os.path.join(tensorrt_path, 'include'))
        include_dirs += include_paths(cuda=True)

        op_files = glob.glob('./mmcv/ops/csrc/tensorrt/plugins/*')
        define_macros += [('MMCV_WITH_CUDA', None)]
        define_macros += [('MMCV_WITH_TRT', None)]
        cuda_args = os.getenv('MMCV_CUDA_ARGS')
        extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
        library_dirs += library_paths(cuda=True)

        from setuptools import Extension
        ext_ops = Extension(name=ext_name,
                            sources=op_files,
                            include_dirs=include_dirs,
                            define_macros=define_macros,
                            extra_compile_args=extra_compile_args,
                            language='c++',
                            library_dirs=library_dirs,
                            libraries=libraries)
        extensions.append(ext_ops)

    if os.getenv('MMCV_WITH_OPS', '0') == '0':
        return extensions

    if EXT_TYPE == 'parrots':
        ext_name = 'mmcv._ext'
        from parrots.utils.build_extension import Extension
        # new parrots op impl do not use MMCV_USE_PARROTS
        # define_macros = [('MMCV_USE_PARROTS', None)]
        define_macros = []
        include_dirs = []
        op_files = glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') +\
            glob.glob('./mmcv/ops/csrc/parrots/*.cpp')
        include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
        include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda'))
        cuda_args = os.getenv('MMCV_CUDA_ARGS')
        extra_compile_args = {
            'nvcc': [cuda_args] if cuda_args else [],
            'cxx': [],
        }
        if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
            define_macros += [('MMCV_WITH_CUDA', None)]
            extra_compile_args['nvcc'] += [
                '-D__CUDA_NO_HALF_OPERATORS__',
                '-D__CUDA_NO_HALF_CONVERSIONS__',
                '-D__CUDA_NO_HALF2_OPERATORS__',
            ]
        ext_ops = Extension(name=ext_name,
                            sources=op_files,
                            include_dirs=include_dirs,
                            define_macros=define_macros,
                            extra_compile_args=extra_compile_args,
                            cuda=True,
                            pytorch=True)
        extensions.append(ext_ops)
    elif EXT_TYPE == 'pytorch':
        ext_name = 'mmcv._ext'
        from torch.utils.cpp_extension import CppExtension, CUDAExtension

        # prevent ninja from using too many resources
        try:
            import psutil
            num_cpu = len(psutil.Process().cpu_affinity())
            cpu_use = max(4, num_cpu - 1)
        except (ModuleNotFoundError, AttributeError):
            cpu_use = 4

        os.environ.setdefault('MAX_JOBS', str(cpu_use))
        define_macros = []

        # Before PyTorch1.8.0, when compiling CUDA code, `cxx` is a
        # required key passed to PyTorch. Even if there is no flag passed
        # to cxx, users also need to pass an empty list to PyTorch.
        # Since PyTorch1.8.0, it has a default value so users do not need
        # to pass an empty list anymore.
        # More details at https://github.com/pytorch/pytorch/pull/45956
        extra_compile_args = {'cxx': []}

        # Since the PR (https://github.com/open-mmlab/mmcv/pull/1463) uses
        # c++14 features, the argument ['std=c++14'] must be added here.
        # However, in the windows environment, some standard libraries
        # will depend on c++17 or higher. In fact, for the windows
        # environment, the compiler will choose the appropriate compiler
        # to compile those cpp files, so there is no need to add the
        # argument
        if platform.system() != 'Windows':
            extra_compile_args['cxx'] = ['-std=c++14']

        include_dirs = []

        is_rocm_pytorch = False
        try:
            from torch.utils.cpp_extension import ROCM_HOME
            is_rocm_pytorch = True if ((torch.version.hip is not None) and
                                       (ROCM_HOME is not None)) else False
        except ImportError:
            pass

        project_dir = 'mmcv/ops/csrc/'
        if is_rocm_pytorch:
            from torch.utils.hipify import hipify_python

            hipify_python.hipify(
                project_directory=project_dir,
                output_directory=project_dir,
                includes='mmcv/ops/csrc/*',
                show_detailed=True,
                is_pytorch_extension=True,
            )
            define_macros += [('MMCV_WITH_CUDA', None)]
            define_macros += [('HIP_DIFF', None)]
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
            extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/hip/*') \
                + glob.glob('./mmcv/ops/csrc/pytorch/cpu/hip/*')
            extension = CUDAExtension
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/hip'))
        elif torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
            define_macros += [('MMCV_WITH_CUDA', None)]
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
            extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cpp')
            extension = CUDAExtension
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda'))
        else:
            print(f'Compiling {ext_name} without CUDA')
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')
            extension = CppExtension
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))

        # Since the PR (https://github.com/open-mmlab/mmcv/pull/1463) uses
        # c++14 features, the argument ['std=c++14'] must be added here.
        # However, in the windows environment, some standard libraries
        # will depend on c++17 or higher. In fact, for the windows
        # environment, the compiler will choose the appropriate compiler
        # to compile those cpp files, so there is no need to add the
        # argument
        if 'nvcc' in extra_compile_args and platform.system() != 'Windows':
            extra_compile_args['nvcc'] += ['-std=c++14']

        ext_ops = extension(name=ext_name,
                            sources=op_files,
                            include_dirs=include_dirs,
                            define_macros=define_macros,
                            extra_compile_args=extra_compile_args)
        extensions.append(ext_ops)

    if EXT_TYPE == 'pytorch' and os.getenv('MMCV_WITH_ORT', '0') != '0':
        ext_name = 'mmcv._ext_ort'
        from torch.utils.cpp_extension import library_paths, include_paths
        import onnxruntime
        library_dirs = []
        libraries = []
        include_dirs = []
        ort_path = os.getenv('ONNXRUNTIME_DIR', '0')
        library_dirs += [os.path.join(ort_path, 'lib')]
        libraries.append('onnxruntime')
        define_macros = []
        extra_compile_args = {'cxx': []}

        include_path = os.path.abspath('./mmcv/ops/csrc/onnxruntime')
        include_dirs.append(include_path)
        include_dirs.append(os.path.join(ort_path, 'include'))

        op_files = glob.glob('./mmcv/ops/csrc/onnxruntime/cpu/*')
        if onnxruntime.get_device() == 'GPU' or os.getenv('FORCE_CUDA',
                                                          '0') == '1':
            define_macros += [('MMCV_WITH_CUDA', None)]
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
            extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
            op_files += glob.glob('./mmcv/ops/csrc/onnxruntime/gpu/*')
            include_dirs += include_paths(cuda=True)
            library_dirs += library_paths(cuda=True)
        else:
            include_dirs += include_paths(cuda=False)
            library_dirs += library_paths(cuda=False)

        from setuptools import Extension
        ext_ops = Extension(name=ext_name,
                            sources=op_files,
                            include_dirs=include_dirs,
                            define_macros=define_macros,
                            extra_compile_args=extra_compile_args,
                            language='c++',
                            library_dirs=library_dirs,
                            libraries=libraries)
        extensions.append(ext_ops)

    return extensions
示例#20
0
from setuptools import setup, Extension
import sys
import os
from torch.utils import cpp_extension

if __name__ == '__main__':
    if sys.platform == "darwin":
        args = ["-DAPPLE"]
    else:
        args = ["-fopenmp"]
    if os.environ.get("DEBUG_TRANSDUCER") == "1":
        args.append("-DDEBUG")
        args.append("-O0")

    ext = Extension(name='transducer_cpp',
                    sources=['transducer.cpp'],
                    include_dirs=cpp_extension.include_paths(),
                    extra_compile_args=args,
                    language='c++')

    setup(name='transducer_cpp',
          ext_modules=[ext],
          cmdclass={'build_ext': cpp_extension.BuildExtension})
示例#21
0
文件: setup.py 项目: sadjad/nnfc
            '\s*\#define\s*{def_name}\s*\"((\d+\.)*\d+?)"[ \t]*$'.format(
                def_name=definition_name), headerfile, re.MULTILINE).group(1)
        return version_found


VERSION = get_def('../config.h', 'VERSION')
EXTENSION_NAME = 'nnfc._ext.nnfc_codec'
#CUDA_AVAILABLE = torch.cuda.is_available()
CUDA_AVAILABLE = False  # TODO(jremmons) add back support for CUDA functions

cuda_sources = [
    'nnfc/src/nnfc_cuda.cc',
] if CUDA_AVAILABLE else []

pytorch_include = []
for lib in include_paths(cuda=CUDA_AVAILABLE):
    # HACK newer versions of cstdlib use #include_next<stdlib.h> which will break
    # if we add /usr/include here.
    if lib != "/usr/include":
        pytorch_include += ['-isystem', lib]

print(pytorch_include)

pytorch_libdirs = library_paths(cuda=CUDA_AVAILABLE)
pytorch_libs = ['cudart'] if CUDA_AVAILABLE else []
pytorch_defines = [('_NNFC_CUDA_AVAILABLE', 1)] if CUDA_AVAILABLE else []

module = Extension(
    EXTENSION_NAME,
    sources=[
        'nnfc/src/nnfc_codec.cc', 'nnfc/src/nnfc_encoder.cc',
示例#22
0
文件: setup.py 项目: yyaya/mvfst-rl
if not PREFIX:
    PREFIX = "/usr/local"

extra_compile_args = []
extra_link_args = []

protoc = f"{PREFIX}/bin/protoc"

grpc_objects = [
    f"{PREFIX}/lib/libgrpc++.a",
    f"{PREFIX}/lib/libgrpc.a",
    f"{PREFIX}/lib/libgpr.a",
    f"{PREFIX}/lib/libaddress_sorting.a",
]

include_dirs = cpp_extension.include_paths() + [f"{PREFIX}/include"]
libraries = []

if sys.platform == "darwin":
    extra_compile_args += ["-stdlib=libc++", "-mmacosx-version-min=10.14"]
    extra_link_args += ["-stdlib=libc++", "-mmacosx-version-min=10.14"]

    # Relevant only when c-cares is not embedded in grpc, e.g. when
    # installing grpc via homebrew.
    libraries.append("cares")
elif sys.platform == "linux":
    libraries.append("z")

grpc_objects.append(f"{PREFIX}/lib/libprotobuf.a")

actorpool = cpp_extension.CppExtension(
示例#23
0
    else:
        return s


if torch.__version__.startswith("1.0"):
    pytorch_base_include_path_suffix = osp.sep + osp.join("lib", "include")
elif torch.__version__.startswith("1.1"):
    pytorch_base_include_path_suffix = osp.sep + osp.join("include")
elif torch.__version__.startswith("1.2"):
    pytorch_base_include_path_suffix = osp.sep + osp.join("include")
else:
    raise ValueError("unsupported PyTorch version {}".format(
        torch.__version__))

pytorch_install_prefix = None
for current_include_path in cpp_extension.include_paths(False):
    if ("torch" + pytorch_base_include_path_suffix) in current_include_path:
        offset_of_search_string = current_include_path.rfind(
            "torch" + pytorch_base_include_path_suffix)
        current_include_path = current_include_path[:offset_of_search_string +
                                                    len("torch")]
        if osp.isdir(osp.join(current_include_path, "share")):
            pytorch_install_prefix = current_include_path
            break

if pytorch_install_prefix is None:
    raise ValueError("could not determine pytorch install prefix")

cmake_args = [
    "-DPYTORCH_INCLUDES:PATH=" +
    osp.pathsep.join(cpp_extension.include_paths(False)),