Esempio n. 1
0
def ext_modules():
    extensions = [
        cpp_extension.CppExtension(
            "torchsort.isotonic_cpu",
            sources=["torchsort/isotonic_cpu.cpp"],
            extra_compile_args=compile_args(),
        ),
    ]
    if cuda_toolkit_available():
        extensions.append(
            cpp_extension.CUDAExtension(
                "torchsort.isotonic_cuda",
                sources=["torchsort/isotonic_cuda.cu"],
            ))
    return extensions
Esempio n. 2
0
def get_extensions():
    # This code originally offered non-CUDA compilation... this seemed to be a
    # lie though as crucial methods like "modulated_deform_conv" are currently
    # unimplemented for CPU. Instead, we bite the bullet and make CUDA an
    # explicit requirement for this package. It matches the typical use-case at
    # the end of the day.
    root = os.path.join('fcos', 'core', 'csrc')
    return [
        tcpp.CUDAExtension("fcos.core._C",
                           [glob(os.path.join(root, '*.cpp'))[0]] +
                           glob(os.path.join(root, 'cpu', '*.cpp')) +
                           glob(os.path.join(root, 'cuda', '*.cu')),
                           include_dirs=[root],
                           define_macros=[("WITH_CUDA", None)],
                           extra_compile_args={
                               "cxx": [],
                               "nvcc": [
                                   "-DCUDA_HAS_FP16=1",
                                   "-D__CUDA_NO_HALF_OPERATORS__",
                                   "-D__CUDA_NO_HALF_CONVERSIONS__",
                                   "-D__CUDA_NO_HALF2_OPERATORS__",
                               ]
                           })
    ]
Esempio n. 3
0
from setuptools import setup
from torch.utils import cpp_extension

setup(name='rle',
      description="a package used for compress sparse tensor",
      packages=["rle"],
      package_data={"rle": ["__init__.py"]},
      ext_modules=[
          cpp_extension.CUDAExtension('rle_cuda', [
              'rle_cuda.cpp',
              'rle_cuda_kernel.cu',
          ]),
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 4
0
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='lltm_cpp',
      ext_modules=[
          cpp_extension.CppExtension('lltm_cpp', ['./models/ops/lltm.cpp'])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})

setup(name='lltm_cuda',
      ext_modules=[
          cpp_extension.CUDAExtension('lltm_cuda', [
              './models/ops/lltm_cuda.cpp',
              './models/ops/lltm_cuda_kernel.cu',
          ])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 5
0
plugin_include_dirs = [
    "{}/include/".format(ucc_plugin_dir), "{}/include/".format(ucx_home),
    "{}/include/".format(ucc_home)
]
plugin_library_dirs = ["{}/lib/".format(ucx_home), "{}/lib/".format(ucc_home)]
plugin_libraries = ["ucp", "uct", "ucm", "ucs", "ucc"]

with_cuda = os.environ.get("WITH_CUDA")
if with_cuda is None or with_cuda == "no":
    print("CUDA support is disabled")
    module = cpp_extension.CppExtension(name="torch_ucc",
                                        sources=plugin_sources,
                                        include_dirs=plugin_include_dirs,
                                        library_dirs=plugin_library_dirs,
                                        libraries=plugin_libraries,
                                        extra_compile_args=plugin_compile_args)
else:
    print("CUDA support is enabled")
    plugin_compile_args.append("-DUSE_CUDA")
    module = cpp_extension.CUDAExtension(
        name="torch_ucc",
        sources=plugin_sources,
        include_dirs=plugin_include_dirs,
        library_dirs=plugin_library_dirs,
        libraries=plugin_libraries,
        extra_compile_args=plugin_compile_args)
setup(name="torch-ucc",
      version="1.0.0",
      ext_modules=[module],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 6
0
import fileinput
import os
import sys

from setuptools import setup
from torch.utils import cpp_extension

filenames = [
    os.path.join(os.path.dirname(__file__), "fused_ops_frontend.cpp"),
    os.path.join(os.path.dirname(__file__), "multi_tensor_adam.cu"),
    os.path.join(os.path.dirname(__file__), "multi_tensor_scale_kernel.cu"),
    os.path.join(os.path.dirname(__file__), "multi_tensor_axpby_kernel.cu"),
]

use_rocm = True if os.environ["ONNXRUNTIME_ROCM_VERSION"] else False
extra_compile_args = {"cxx": ["-O3"]}
if not use_rocm:
    extra_compile_args.update(
        {"nvcc": ["-lineinfo", "-O3", "--use_fast_math"]})

setup(
    name="fused_ops",
    ext_modules=[
        cpp_extension.CUDAExtension(name="fused_ops",
                                    sources=filenames,
                                    extra_compile_args=extra_compile_args)
    ],
    cmdclass={"build_ext": cpp_extension.BuildExtension},
)
Esempio n. 7
0
                rmtree(path)


ext_modules = [
    cpp_extension.CUDAExtension(
        'trtorch._C', [
            'trtorch/csrc/trtorch_py.cpp',
            'trtorch/csrc/tensorrt_backend.cpp',
            'trtorch/csrc/tensorrt_classes.cpp',
            'trtorch/csrc/register_tensorrt_classes.cpp',
        ],
        library_dirs=[(dir_path + '/trtorch/lib/'), "/opt/conda/lib/python3.6/config-3.6m-x86_64-linux-gnu"],
        libraries=["trtorch"],
        include_dirs=[
            dir_path + "trtorch/csrc",
            dir_path + "/../",
            dir_path + "/../bazel-TRTorch/external/tensorrt/include",
        ],
        extra_compile_args=[
            "-Wno-deprecated",
            "-Wno-deprecated-declarations",
        ] + (["-D_GLIBCXX_USE_CXX11_ABI=1"] if CXX11_ABI else ["-D_GLIBCXX_USE_CXX11_ABI=0"]),
        extra_link_args=[
            "-Wno-deprecated", "-Wno-deprecated-declarations", "-Wl,--no-as-needed", "-ltrtorch",
            "-Wl,-rpath,$ORIGIN/lib", "-lpthread", "-ldl", "-lutil", "-lrt", "-lm", "-Xlinker", "-export-dynamic"
        ] + (["-D_GLIBCXX_USE_CXX11_ABI=1"] if CXX11_ABI else ["-D_GLIBCXX_USE_CXX11_ABI=0"]),
        undef_macros=["NDEBUG"])
]

with open("README.md", "r", encoding="utf-8") as fh:
    long_description = fh.read()
Esempio n. 8
0
import os
from setuptools import setup, Extension
from torch.utils import cpp_extension


# library_dirs should point to the libtrtorch.so, include_dirs should point to the dir that include the headers
# 1) download the latest package from https://github.com/NVIDIA/TRTorch/releases/
# 2) Extract the file from downloaded package, we will get the "trtorch" directory
# 3) Set trtorch_path to that directory
trtorch_path = <PATH TO TRTORCH>

ext_modules = [
    cpp_extension.CUDAExtension('elu_converter', ['./csrc/elu_converter.cpp'],
                                library_dirs=[(trtorch_path + "/lib/")],
                                libraries=["trtorch"],
                                include_dirs=[trtorch_path + "/include/trtorch/"])
]

setup(
    name='elu_converter',
    ext_modules=ext_modules,
    cmdclass={'build_ext': cpp_extension.BuildExtension},
)
Esempio n. 9
0
base_path = os.path.dirname(os.path.realpath(__file__))
if 'Windows' in platform():
    CUDA_HOME = os.environ.get('CUDA_HOME', os.environ.get('CUDA_PATH'))
    extra_args = []
else:
    CUDA_HOME = os.environ.get('CUDA_HOME', '/usr/local/cuda')
    extra_args = ['-Wno-sign-compare']

with open(f'frameworks/pytorch/_version.py', 'wt') as f:
    f.write(f'__version__ = "{VERSION}"')

extension = cpp_extension.CUDAExtension(
    'haste_pytorch_lib',
    sources=glob('frameworks/pytorch/*.cc'),
    extra_compile_args=extra_args,
    include_dirs=[
        os.path.join(base_path, 'lib'),
        os.path.join(CUDA_HOME, 'include')
    ],
    libraries=['haste'],
    library_dirs=['.'])

setup(name='haste_pytorch',
      version=VERSION,
      description=DESCRIPTION,
      long_description=open('README.md', 'r', encoding='utf-8').read(),
      long_description_content_type='text/markdown',
      author=AUTHOR,
      author_email=AUTHOR_EMAIL,
      url=URL,
      license=LICENSE,
      keywords='pytorch machine learning rnn lstm gru custom op',
Esempio n. 10
0
                rmtree(path)

ext_modules = [
    cpp_extension.CUDAExtension('trtorch._C',
                                ['trtorch/csrc/trtorch_py.cpp'],
                                library_dirs=[
                                    dir_path + '/trtorch/lib/libtrtorch.so',
                                    dir_path + '/trtorch/lib/'
                                ],
                                libraries=[
                                    "trtorch"
                                ],
                                include_dirs=[
                                    dir_path + "/../",
                                    dir_path + "/../bazel-TRTorch/external/tensorrt/include",
                                ],
                                extra_compile_args=[
                                    "-D_GLIBCXX_USE_CXX11_ABI=0",
                                    "-Wno-deprecated-declaration",
                                ],
                                extra_link_args=[
                                    "-D_GLIBCXX_USE_CXX11_ABI=0"
                                    "-Wl,--no-as-needed",
                                    "-ltrtorch",
                                    "-Wl,-rpath,$ORIGIN/lib"
                                ],
                                undef_macros=[ "NDEBUG" ]
                            )
]

with open("README.md", "r") as fh:
Esempio n. 11
0
import torch
from setuptools import setup
import torch.utils.cpp_extension as cpp

# In any case, include the CPU version
modules = [
    cpp.CppExtension('torchsearchsorted.cpu',
                     ['torchsearchsorted/cpu/searchsorted_cpu_wrapper.cpp'])
]

# if CUDA is available, add the cuda extension
if torch.cuda.is_available():
    modules += [
        cpp.CUDAExtension('torchsearchsorted.cuda', [
            'torchsearchsorted/cuda/searchsorted_cuda_wrapper.cpp',
            'torchsearchsorted/cuda/searchsorted_cuda_kernel.cu'
        ])
    ]

# Now proceed to setup
setup(name='torchsearchsorted',
      version='1.0',
      description='A searchsorted implementation for pytorch',
      keywords='searchsorted',
      author='Antoine Liutkus',
      author_email='*****@*****.**',
      packages=['torchsearchsorted'],
      ext_modules=modules,
      cmdclass={'build_ext': cpp.BuildExtension})
Esempio n. 12
0
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='double_ext',
      ext_modules=[
          cpp_extension.CUDAExtension('double_ext', ['double_ext.cpp'],
                                      libraries=['double_kernel'])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 13
0
"""
Created at 07.11.19 19:12
@author: gregor

"""

from setuptools import setup
from torch.utils import cpp_extension

setup(name='sandbox_cuda',
      ext_modules=[
          cpp_extension.CUDAExtension(
              'sandbox', ['src/sandbox.cpp', 'src/sandbox_cuda.cu'])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 14
0
"""
Created at 07.11.19 19:12
@author: gregor

"""

import os, sys, site
from pathlib import Path

# recognise newly installed packages in path
site.main()

from setuptools import setup
from torch.utils import cpp_extension

dir_ = Path(os.path.dirname(sys.argv[0]))

setup(name='RoIAlign extension 2D',
      ext_modules=[cpp_extension.CUDAExtension('roi_al_extension', [str(dir_/'src/RoIAlign_interface.cpp'),
                                                                    str(dir_/'src/RoIAlign_cuda.cu')])],
      cmdclass={'build_ext': cpp_extension.BuildExtension}
      )

setup(name='RoIAlign extension 3D',
      ext_modules=[cpp_extension.CUDAExtension('roi_al_extension_3d', [str(dir_/'src/RoIAlign_interface_3d.cpp'),
                                                                       str(dir_/'src/RoIAlign_cuda_3d.cu')])],
      cmdclass={'build_ext': cpp_extension.BuildExtension}
      )
Esempio n. 15
0
from setuptools import setup
from torch.utils import cpp_extension
import os
import glob

ext_modules = [
    cpp_extension.CppExtension(
        "splatting.cpu",
        ["cpp/splatting.cpp"],
    ),
]

cublas_include_paths = glob.glob("/usr/local/**/cublas_v2.h", recursive=True)
if len(cublas_include_paths) > 0:
    ext_modules.append(
        cpp_extension.CUDAExtension(
            "splatting.cuda",
            ["cuda/splatting_cuda.cpp", "cuda/splatting.cu"],
            include_dirs=[os.path.dirname(cublas_include_paths[0])],
        ), )

setup(
    name="splatting",
    ext_modules=ext_modules,
    cmdclass={"build_ext": cpp_extension.BuildExtension},
    install_requires=["torch"],
    extras_require={"dev": ["pytest", "pytest-cov",
                            "pre-commit"]},  # pip install -e '.[dev]'
)
from setuptools import setup, Extension
from torch.utils import cpp_extension

# setup(name='dp_cpp',
#       ext_modules=[cpp_extension.CppExtension('lltm_cpp', ['lltm.cpp'])],
#       cmdclass={'build_ext': cpp_extension.BuildExtension})

setup(
    name='dp_cuda',
    ext_modules=[
        cpp_extension.CUDAExtension('dp_cuda', [
            'dp_cuda.cpp',
            'dp_cuda_kernel.cu',
        ])
    ],
    cmdclass={
        'build_ext': cpp_extension.BuildExtension
    })
setup(
    name='dp_ne_cuda',
    ext_modules=[
        cpp_extension.CUDAExtension('dp_ne_cuda', [
            'dp_ne_cuda.cpp',
            'dp_ne_cuda_kernel.cu',
        ])
    ],
    cmdclass={
        'build_ext': cpp_extension.BuildExtension
    })
# setup(
#     name='dp_ne_rank1_cuda',
Esempio n. 17
0
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='smear_cuda',
      ext_modules=[
          cpp_extension.CUDAExtension(
              'smear_cuda', ['smear_cuda.cpp', 'smear_cuda_kernel.cu'])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 18
0
from setuptools import setup, Extension, find_packages
from torch.utils import cpp_extension
'''
    python setup.py install
    usage: import torch first, then import this module
'''

setup(name='pytorch_loss',
      ext_modules=[
          cpp_extension.CUDAExtension('focal_cpp', [
              'csrc/focal_kernel.cu',
          ]),
          cpp_extension.CUDAExtension('mish_cpp', ['csrc/mish_kernel.cu']),
          cpp_extension.CUDAExtension('swish_cpp', ['csrc/swish_kernel.cu']),
          cpp_extension.CUDAExtension('soft_dice_cpp',
                                      ['csrc/soft_dice_kernel_v2.cu']),
          cpp_extension.CUDAExtension('lsr_cpp', ['csrc/lsr_kernel.cu']),
          cpp_extension.CUDAExtension('large_margin_cpp',
                                      ['csrc/large_margin_kernel.cu']),
          cpp_extension.CUDAExtension('ohem_cpp',
                                      ['csrc/ohem_label_kernel.cu']),
          cpp_extension.CUDAExtension('one_hot_cpp',
                                      ['csrc/one_hot_kernel.cu']),
          cpp_extension.CUDAExtension('lovasz_softmax_cpp',
                                      ['csrc/lovasz_softmax.cu']),
          cpp_extension.CUDAExtension('taylor_softmax_cpp',
                                      ['csrc/taylor_softmax.cu']),
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension},
      packages=find_packages())
Esempio n. 19
0
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='rel_to_abs_index_cuda',
      ext_modules=[
          cpp_extension.CUDAExtension(
              'rel_to_abs_index_cuda',
              ['rel_to_abs_index_cuda.cpp', 'rel_to_abs_index_cuda_kernel.cu'])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 20
0
from setuptools import setup, Extension, find_packages
from torch.utils import cpp_extension

setup(
    name='quant',
    ext_modules=[
        cpp_extension.CUDAExtension('quant.cpp_extension.calc_quant_bin',
                                    ['quant/cpp_extension/calc_quant_bin.cc']),
    ],
    cmdclass={'build_ext': cpp_extension.BuildExtension},
    packages=find_packages())
Esempio n. 21
0
from setuptools import setup, Extension
from torch.utils import cpp_extension
import os

module_path = os.path.dirname(__file__)
setup(name='op_cpp',
      ext_modules=[
          cpp_extension.CUDAExtension(
              name="fused",
              sources=["fused_bias_act.cpp", "fused_bias_act_kernel.cu"],
              include_dirs=cpp_extension.include_paths(),
          ),
          cpp_extension.CUDAExtension(
              name="upfirdn2d",
              sources=["upfirdn2d.cpp", "upfirdn2d_kernel.cu"],
              include_dirs=cpp_extension.include_paths(),
          ),
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 22
0
current_dir = os.path.dirname(os.path.abspath(__file__))
cuda_sources = glob.glob(os.path.join(current_dir, 'csrc', 'core', '*.cu'))
cpp_sources = glob.glob(os.path.join(current_dir, 'csrc', 'op', '*.cpp'))
py11_sources = glob.glob(os.path.join(current_dir, 'csrc', 'py11', '*.cpp'))
sources = cuda_sources + cpp_sources + py11_sources

cuda_include_paths = cpp_extension.include_paths(cuda=True)
self_include_paths = [os.path.join(current_dir, 'csrc')]
include_paths = cuda_include_paths + self_include_paths

setup(name='EET',
      version=__version__,
      package_dir={"": "python"},
      packages=find_packages("python"),
      ext_modules=[
          cpp_extension.CUDAExtension(name='EET',
                                      sources=sources,
                                      include_dirs=include_paths,
                                      extra_compile_args={
                                          'cxx': ['-g'],
                                          'nvcc': [
                                              '-U__CUDA_NO_HALF_OPERATORS__',
                                              '-U__CUDA_NO_HALF_CONVERSIONS__',
                                              '-U__CUDA_NO_HALF2_OPERATORS__'
                                          ]
                                      },
                                      define_macros=[('VERSION_INFO',
                                                      __version__)])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 23
0
import torch.cuda
from torch.utils import cpp_extension

sourceFiles = ['hingetree.cpp']
extraCflags = ['-O2']
extraCudaFlags = ['-O2']

if torch.cuda.is_available():
    sourceFiles.append('hingetree_gpu.cu')
    extraCflags.append('-DWITH_CUDA=1')

    setup(name='hingetree_cpp',
          ext_modules=[
              cpp_extension.CUDAExtension(name='hingetree_cpp',
                                          sources=sourceFiles,
                                          extra_compile_args={
                                              'cxx': extraCflags,
                                              'nvcc': extraCudaFlags
                                          })
          ],
          cmdclass={'build_ext': cpp_extension.BuildExtension})
else:
    setup(name='hingetree_cpp',
          ext_modules=[
              cpp_extension.CppExtension(name='hingetree_cpp',
                                         sources=sourceFiles,
                                         extra_compile_args={
                                             'cxx': extraCflags,
                                             'nvcc': extraCudaFlags
                                         })
          ],
          cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 24
0
"""
Created at 07.11.19 19:12
@author: gregor

"""

import os, sys, site
from pathlib import Path

# recognise newly installed packages in path
site.main()

from setuptools import setup
from torch.utils import cpp_extension

dir_ = Path(os.path.dirname(sys.argv[0]))

setup(name='nms_extension',
      ext_modules=[
          cpp_extension.CUDAExtension(
              'nms_extension',
              [str(dir_ / 'src/nms_interface.cpp'),
               str(dir_ / 'src/nms.cu')])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 25
0
import fileinput
import os
import sys

from setuptools import setup
from torch.utils import cpp_extension

# TODO: Implement a cleaner way to auto-generate torch_gpu_allocator.cc
use_rocm = True if os.environ["ONNXRUNTIME_ROCM_VERSION"] else False
gpu_identifier = "hip" if use_rocm else "cuda"
gpu_allocator_header = "HIPCachingAllocator" if use_rocm else "CUDACachingAllocator"
filename = os.path.join(os.path.dirname(__file__), "torch_gpu_allocator.cc")
with fileinput.FileInput(filename, inplace=True) as file:
    for line in file:
        if "___gpu_identifier___" in line:
            line = line.replace("___gpu_identifier___", gpu_identifier)
        if "___gpu_allocator_header___" in line:
            line = line.replace("___gpu_allocator_header___",
                                gpu_allocator_header)
        sys.stdout.write(line)

setup(
    name="torch_gpu_allocator",
    ext_modules=[
        cpp_extension.CUDAExtension(name="torch_gpu_allocator",
                                    sources=[filename])
    ],
    cmdclass={"build_ext": cpp_extension.BuildExtension},
)
Esempio n. 26
0
'''
 File Created: Mon Mar 02 2020
 Author: Peng YUN ([email protected])
 Copyright 2018-2020 Peng YUN, RAM-Lab, HKUST
'''

from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='iou_cpp',
      ext_modules=[cpp_extension.CppExtension('iou_cpp', ['iou.cpp'])],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
setup(name='boxop_cpp',
      ext_modules=[cpp_extension.CppExtension('boxop_cpp', ['boxop.cpp'])],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
setup(name='iou_cuda',
      ext_modules=[
          cpp_extension.CUDAExtension('iou_cuda',
                                      ['iou_cuda.cpp', 'iou_cuda_kernel.cu'])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
setup(name='boxop_cuda',
      ext_modules=[
          cpp_extension.CUDAExtension(
              'boxop_cuda', ['boxop_cuda.cpp', 'boxop_cuda_kernel.cu'])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
from setuptools import setup, Extension
from torch.utils import cpp_extension

# setup(name='dp_cpp',
#       ext_modules=[cpp_extension.CppExtension('lltm_cpp', ['lltm.cpp'])],
#       cmdclass={'build_ext': cpp_extension.BuildExtension})

setup(name='inplace_abn',
      ext_modules=[
          cpp_extension.CUDAExtension('inplace_abn', [
              'inplace_abn.cpp',
              'inplace_abn_cpu.cpp',
              'inplace_abn_cuda.cu',
          ],
                                      extra_compile_args={
                                          'nvcc': ["--expt-extended-lambda"],
                                          'cxx': ["-O3"]
                                      })
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 28
0
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='calc_assoc_cuda',
      ext_modules=[
          cpp_extension.CUDAExtension(
              'calc_assoc_cuda',
              ['calc_assoc_cuda.cpp', 'calc_assoc_cuda_kernel.cu'])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 29
0
from setuptools import setup, Extension
from torch.utils import cpp_extension
'''
    python setup.py install
    usage: import torch first, then import this module
'''

setup(name='pytorch_loss',
      ext_modules=[
          cpp_extension.CUDAExtension(
              'focal_cpp', ['csrc/focal.cpp', 'csrc/focal_kernel.cu']),
          cpp_extension.CUDAExtension('mish_cpp', ['csrc/mish_kernel.cu']),
          cpp_extension.CUDAExtension('swish_cpp', ['csrc/swish_kernel.cu']),
          cpp_extension.CUDAExtension('soft_dice_cpp',
                                      ['csrc/soft_dice_kernel.cu']),
          cpp_extension.CUDAExtension('lsr_cpp', ['csrc/lsr_kernel_v2.cu']),
          cpp_extension.CUDAExtension('large_margin_cpp',
                                      ['csrc/large_margin_kernel.cu']),
          cpp_extension.CUDAExtension('ohem_cpp',
                                      ['csrc/ohem_label_kernel.cu']),
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
Esempio n. 30
0
                      "Removing them to avoid compilation problems.")
                os.environ[FLAG] = re.sub(r' -std=[^ ]*', '', os.environ[FLAG])

from setuptools import setup
import torch
from torch.utils import cpp_extension
import glob

ext_modules = [
    cpp_extension.CppExtension(
        "splatting.cpu",
        ["cpp/splatting.cpp"],
    ),
]

if torch.cuda.is_available():
    ext_modules.append(
        cpp_extension.CUDAExtension(
            "splatting.cuda",
            ["cuda/splatting_cuda.cpp", "cuda/splatting.cu"],
        ), )

setup(
    name="splatting",
    ext_modules=ext_modules,
    cmdclass={"build_ext": cpp_extension.BuildExtension},
    packages=["splatting"],
    install_requires=["torch"],
    extras_require={"dev": ["pytest", "pytest-cov", "pre-commit"]},
)