示例#1
0
# setup file to compile C++ library

from setuptools import setup
import torch, os
from torch.utils.cpp_extension import CppExtension, BuildExtension

this_dir = os.path.dirname(os.path.realpath(__file__))
include_dir = this_dir + '/topologylayer/functional/cohom_cpp/'
extra = {'cxx': ['-std=c++11']}  #, '-D_GLIBCXX_USE_CXX11_ABI=1'

setup(name='topologylayer',
      packages=[
          'topologylayer', 'topologylayer.functional', 'topologylayer.nn',
          'topologylayer.util'
      ],
      ext_modules=[
          CppExtension('topologylayer.functional.cohom_cpp', [
              'topologylayer/functional/cohom_cpp/pybind.cpp',
              'topologylayer/functional/cohom_cpp/cohom.cpp',
              'topologylayer/functional/cohom_cpp/complex.cpp',
              'topologylayer/functional/cohom_cpp/cocycle.cpp'
          ],
                       include_dirs=[include_dir],
                       extra_compile_args=extra['cxx'])
      ],
      cmdclass={'build_ext': BuildExtension},
      zip_safe=False)
示例#2
0
    except:
        pass
else:
    warnings.warn(
        "Option --pyprof not specified. Not installing PyProf dependencies!")

if "--cpp_ext" in sys.argv or "--cuda_ext" in sys.argv:
    if TORCH_MAJOR == 0:
        raise RuntimeError("--cpp_ext requires Pytorch 1.0 or later, "
                           "found torch.__version__ = {}".format(
                               torch.__version__))

if "--cpp_ext" in sys.argv:
    sys.argv.remove("--cpp_ext")
    ext_modules.append(CppExtension('apex_C', [
        'csrc/flatten_unflatten.cpp',
    ]))


def get_cuda_bare_metal_version(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
                                         universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
    release = output[release_idx].split(".")
    bare_metal_major = release[0]
    bare_metal_minor = release[1][0]

    return raw_output, bare_metal_major, bare_metal_minor

示例#3
0
setup(
    name=os.environ.get('TORCH_XLA_PACKAGE_NAME', 'torch_xla'),
    version=version,
    description='XLA bridge for PyTorch',
    url='https://github.com/pytorch/xla',
    author='PyTorch/XLA Dev Team',
    author_email='*****@*****.**',
    # Exclude the build files.
    packages=find_packages(exclude=['build']),
    ext_modules=[
        CppExtension(
            '_XLAC',
            torch_xla_sources,
            include_dirs=include_dirs,
            extra_compile_args=extra_compile_args,
            library_dirs=library_dirs,
            extra_link_args=extra_link_args + \
                [make_relative_rpath('torch_xla/lib')],
        ),
    ],
    package_data={
        'torch_xla': [
            'lib/*.so*',
        ],
    },
    data_files=[
        'test/cpp/build/test_ptxla',
        'scripts/fixup_binary.py',
    ],
    cmdclass={
示例#4
0
文件: setup.py 项目: lhz710818/apex
        pass
else:
    warnings.warn("Option --pyprof not specified. Not installing PyProf dependencies!")

if "--cpp_ext" in sys.argv or "--cuda_ext" in sys.argv:
    if TORCH_MAJOR == 0:
        raise RuntimeError("--cpp_ext requires Pytorch 1.0 or later, "
                           "found torch.__version__ = {}".format(torch.__version__))
    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

if "--cpp_ext" in sys.argv:
    from torch.utils.cpp_extension import CppExtension
    sys.argv.remove("--cpp_ext")
    ext_modules.append(
        CppExtension('apex_C',
                     ['csrc/flatten_unflatten.cpp',]))

def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
    release = output[release_idx].split(".")
    bare_metal_major = release[0]
    bare_metal_minor = release[1][0]
    torch_binary_major = torch.version.cuda.split(".")[0]
    torch_binary_minor = torch.version.cuda.split(".")[1]

    print("\nCompiling cuda extensions with")
    print(raw_output + "from " + cuda_dir + "/bin\n")

    if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
示例#5
0
extra_compile_args += ["--expt-relaxed-constexpr"]

nvcc_extra_args = [
    "--expt-relaxed-constexpr",
    "-O2",
    "--gpu-architecture=sm_61",
    "-lineinfo",
    f"-I/home/{getuser()}/libs/cub-1.8.0",
]

setup(
    name="bfpactivation_cpu",
    ext_modules=[
        CppExtension(
            "bfpactivation_cpu",
            sources=["bfpactivation_cpu.cpp"],
            extra_compile_args=flags,
        )
    ],
    extra_compile_args=extra_compile_args,
    cmdclass={"build_ext": BuildExtension},
)

setup(
    name="bfpactivation_cuda",
    ext_modules=[
        CUDAExtension(
            "bfpactivation_cuda",
            sources=["bfpactivation_cuda.cpp", "bfpactivation_cuda_kernel.cu"],
            extra_compile_args={
                "cxx": flags,
示例#6
0
    cudaconfig = {
        'home': home,
        'nvcc': nvcc,
        'include': pjoin(home, 'include'),
        'lib64': pjoin(home, 'lib64')
    }
    for k, v in cudaconfig.items():
        if not os.path.exists(v):
            raise EnvironmentError(
                'The CUDA %s path could not be located in %s' % (k, v))

    return cudaconfig


CUDA = locate_cuda()
setup(
    name='torch_autograd_solver',
    ext_modules=[
        CppExtension('torch_autograd_solver_aten',
                     ['torch_autograd_solver.cpp'],
                     extra_compile_args=["-fopenmp"]
                     # extra_link_args=["-D_GLIBCXX_USE_CXX11_ABI=1"]
                     )
    ],
    cmdclass={'build_ext': BuildExtension},
    packages=["torch_autograd_solver"],
    classifiers=[
        'Programming Language :: Python :: 3',
    ],
)
示例#7
0
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, BuildExtension
import os

# export CUDA_HOME= /usr/local/cuda/
conda = os.getenv("CUDA_HOME")
if conda:
    inc = [conda + "/include"]
else:
    inc = []

libname = "torch_batch_svd"
setup(name=libname,
      ext_modules=[
          CppExtension(libname, [libname + '.cpp'],
                       include_dirs=inc,
                       libraries=["cusolver", "cublas"],
                       extra_compile_args={
                           'cxx': ['-g', '-DDEBUG'],
                           'nvcc': ['-O2']
                       })
      ],
      cmdclass={'build_ext': BuildExtension})
示例#8
0
from setuptools import setup, find_packages
import torch
from torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME

ext_modules = [
    CppExtension('sym3eig_cpu', ['cpu/sym3eig.cpp']),
]
cmdclass = {'build_ext': torch.utils.cpp_extension.BuildExtension}

if CUDA_HOME is not None:
    ext_modules += [
        CUDAExtension('sym3eig_cuda',
                      ['cuda/sym3eig.cpp', 'cuda/sym3eig_kernel.cu'])
    ]

__version__ = '1.0.0'
#url = 'https://github.com/mrjel/pytorch_sym3eig'

install_requires = ['torchvision']
setup_requires = ['pytest-runner']
tests_require = ['pytest', 'pytest-cov', 'numpy']

setup(
    name='torch_sym3eig',
    version=__version__,
    description=
    'Implementation of batch-wise eigenvector/value computation for symmetric 3x3 matrices'
    'Batchwise symmetric 3x3 eigencomputation in PyTorch',
    author='Jan Eric Lenssen',
    author_email='*****@*****.**',
    #url=url,
示例#9
0
conda_env = os.environ['CONDA_PREFIX']

if len(conda_env) > 0 and len(opencv_inc_dir) == 0 and len(opencv_lib_dir) == 0:
	print("Detected active conda environment:", conda_env)
	
	opencv_inc_dir = conda_env + '/include' 
	opencv_lib_dir = conda_env + '/lib' 

	print("Assuming OpenCV dependencies in:")
	print(opencv_inc_dir)
	print(opencv_lib_dir)

if len(opencv_inc_dir) == 0:
	print("Error: You have to provide an OpenCV include directory. Edit this file.")
	exit()
if len(opencv_lib_dir) == 0:
	print("Error: You have to provide an OpenCV library directory. Edit this file.")
	exit()

setup(
	name='ngransac',
	ext_modules=[CppExtension(
		name='ngransac', 
		sources=['ngransac.cpp','thread_rand.cpp'],
		include_dirs=[opencv_inc_dir],
		library_dirs=[opencv_lib_dir],
		libraries=['opencv_core','opencv_calib3d'],
		extra_compile_args=['-fopenmp']
		)],		
	cmdclass={'build_ext': BuildExtension})
示例#10
0
from setuptools import setup
import torch
from torch.utils.cpp_extension import CppExtension

extra_compile_args = ['-g']

ext_modules = [
    CppExtension('pygrid.grid_cpp', ['src/grid.cpp']),
]
cmdclass = {'build_ext': torch.utils.cpp_extension.BuildExtension}

__version__ = '1.0.0'

install_requires = ['scipy']
setup_requires = ['pytest-runner']
tests_require = ['pytest', 'pytest-cov']

setup(
    name='torch_extension',
    version=__version__,
    description=('PyTorch Extension'),
    install_requires=install_requires,
    setup_requires=setup_requires,
    tests_require=tests_require,
    ext_modules=ext_modules,
    cmdclass=cmdclass,
)
from setuptools import find_packages, setup
from Cython.Build import cythonize
import torch
import numpy
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
                                       CUDAExtension)

setup(
    name="CAP",
    ext_modules=[
        CppExtension(name='cpu_nms',
                     sources=['cpu_nms.cpp'],
                     extra_compile_args={'cxx': []}),
        # CUDAExtension(
        #     name='gpu_nms',
        #     sources=['gpu_nms.cpp', 'nms_kernel.cu'],
        #     define_macros=[('WITH_CUDA', None)],
        #     extra_compile_args={'cxx': [],'nvcc': [
        #         '-D__CUDA_NO_HALF_OPERATORS__',
        #         '-D__CUDA_NO_HALF_CONVERSIONS__',
        #         '-D__CUDA_NO_HALF2_OPERATORS__',
        #     ]},
        #     include_dirs=['gpu_nms.hpp']
        # )
    ],
    cmdclass={'build_ext': BuildExtension},
    include_dirs=[numpy.get_include()])
示例#12
0
文件: setup.py 项目: Aelphy/gesvd
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension

setup(
    name='gesvd',
    version="0.1",
    ext_modules=[
        CppExtension('gesvd_cpp', ['gesvd.cpp'], extra_link_args=['-lopenblas']),
    ],
    scripts=['gesvd.py', '__init__.py'],
    cmdclass={
        'build_ext': BuildExtension
    })
示例#13
0
import sys
import torch.cuda
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME

CXX_FLAGS = ['/sdl', '/permissive-'] if sys.platform == 'win32' else ['-g', '-Werror']

ext_modules = [
    CppExtension(
        'torch_test_cpp_extension.cpp', ['extension.cpp'],
        extra_compile_args=CXX_FLAGS),
    CppExtension(
        'torch_test_cpp_extension.msnpu', ['msnpu_extension.cpp'],
        extra_compile_args=CXX_FLAGS),
]

if torch.cuda.is_available() and CUDA_HOME is not None:
    extension = CUDAExtension(
        'torch_test_cpp_extension.cuda', [
            'cuda_extension.cpp',
            'cuda_extension_kernel.cu',
            'cuda_extension_kernel2.cu',
        ],
        extra_compile_args={'cxx': CXX_FLAGS,
                            'nvcc': ['-O2']})
    ext_modules.append(extension)

setup(
    name='torch_test_cpp_extension',
    packages=['torch_test_cpp_extension'],
示例#14
0
文件: setup.py 项目: limuhit/CCN
#!/usr/bin/env python3
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CppExtension

cxx_args = ['-std=c++11', '/wd4251', '/W0', '/O1']

setup(
    name='coder',
    ext_modules=[
        CppExtension('coder',
                     ['python.cpp', 'BitIoStream.cpp', 'ArithmeticCoder.cpp'],
                     extra_compile_args={'cxx': cxx_args})
    ],
    cmdclass={'build_ext': BuildExtension})
示例#15
0
        'fairseq/models/fused_relu_dropout/fused_relu_dropout_cuda.cpp',
        'fairseq/models/fused_relu_dropout/fused_relu_dropout_cuda_kernel.cu'
    ],
    extra_compile_args={
        'cxx': [
            '-O2',
        ],
        'nvcc': [
            '--gpu-architecture=sm_70', '-O3', '--use_fast_math',
            '--expt-extended-lambda'
        ],
    })
batch_utils_v0p5 = CppExtension(
    name='fairseq.data.batch_C_v0p5',
    sources=['fairseq/data/csrc/make_batches_v0p5.cpp'],
    extra_compile_args={
        'cxx': [
            '-O2',
        ],
    })
batch_utils_v0p5_better = CppExtension(
    name='fairseq.data.batch_C_v0p5_better',
    sources=['fairseq/data/csrc/make_batches_v0p5_better.cpp'],
    extra_compile_args={
        'cxx': ['-O2', '--std=c++14'],
    })
batch_utils_v0p6 = CppExtension(
    name='fairseq.data.batch_C_v0p6',
    sources=['fairseq/data/csrc/make_batches_v0p6.cpp'],
    extra_compile_args={
        'cxx': ['-O2', '--std=c++14'],
    })
示例#16
0
文件: setup.py 项目: xsacha/pytorch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME

if sys.platform == 'win32':
    vc_version = os.getenv('VCToolsVersion', '')
    if vc_version.startswith('14.16.'):
        CXX_FLAGS = ['/sdl']
    else:
        CXX_FLAGS = ['/sdl', '/permissive-']
else:
    CXX_FLAGS = ['-g']

USE_NINJA = os.getenv('USE_NINJA') == '1'

ext_modules = [
    CppExtension('torch_test_cpp_extension.cpp', ['extension.cpp'],
                 extra_compile_args=CXX_FLAGS),
    CppExtension('torch_test_cpp_extension.ort', ['ort_extension.cpp'],
                 extra_compile_args=CXX_FLAGS),
    CppExtension('torch_test_cpp_extension.rng', ['rng_extension.cpp'],
                 extra_compile_args=CXX_FLAGS),
]

if torch.cuda.is_available() and (CUDA_HOME is not None
                                  or ROCM_HOME is not None):
    extension = CUDAExtension('torch_test_cpp_extension.cuda', [
        'cuda_extension.cpp',
        'cuda_extension_kernel.cu',
        'cuda_extension_kernel2.cu',
    ],
                              extra_compile_args={
                                  'cxx': CXX_FLAGS,
示例#17
0
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, BuildExtension
import os

conda = "/usr/local/cuda-10.0"
inc = [conda + "/include"]

libname = "cuda_batch_inverse"
setup(name=libname,
      ext_modules=[
          CppExtension(libname, ['cuda_inverse_pytorch.cpp'],
                       include_dirs=inc,
                       libraries=["cusolver", "cublas"],
                       extra_compile_args={
                           'cxx': [],
                           'nvcc': ['-O2']
                       })
      ],
      cmdclass={'build_ext': BuildExtension})
示例#18
0
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, BuildExtension

from torch_autograd_solver import __version__

setup(
    name='torch_autograd_solver',
    version=__version__,
    description='autograd solver C++ implementation for pytorch',
    url='https://github.com/ShigekiKarita/pytorch-autograd-solver',
    author='Shigeki Karita',
    author_email="*****@*****.**",
    license='BSL-1.0',
    keywords='pytorch',
    ext_modules=[
        CppExtension('torch_autograd_solver_aten',
                     ['torch_autograd_solver.cpp'],
                     extra_compile_args=["-fopenmp"])
    ],
    cmdclass={'build_ext': BuildExtension},
    packages=["torch_autograd_solver"],
    classifiers=[
        'Programming Language :: Python :: 3',
    ],
)
示例#19
0
from setuptools import setup, find_packages
import torch
from torch.utils.cpp_extension import CppExtension, CUDAExtension

ext_modules = [
    CppExtension('basis_cpu', ['cpu/basis.cpp']),
    CppExtension('weighting_cpu', ['cpu/weighting.cpp']),
]
cmdclass = {'build_ext': torch.utils.cpp_extension.BuildExtension}

if torch.cuda.is_available():
    ext_modules += [
        CUDAExtension('basis_cuda',
                      ['cuda/basis.cpp', 'cuda/basis_kernel.cu']),
        CUDAExtension('weighting_cuda',
                      ['cuda/weighting.cpp', 'cuda/weighting_kernel.cu']),
    ]

__version__ = '1.0.4'
url = 'https://github.com/rusty1s/pytorch_spline_conv'

install_requires = []
setup_requires = ['pytest-runner']
tests_require = ['pytest', 'pytest-cov']

setup(
    name='torch_spline_conv',
    version=__version__,
    description='Implementation of the Spline-Based Convolution'
    'Operator of SplineCNN in PyTorch',
    author='Matthias Fey',
示例#20
0
                                "nvcc": ["-U__CUDA_NO_HALF_CONVERSIONS__"]},
            libraries=["nvidia-ml"],
        ) if not cpu_only_build else
        CppExtension(
            name="fbgemm_gpu_py",
            sources=[
                os.path.join(cur_dir, build_codegen_path, "{}".format(f))
                for f in cpp_cpu_output_files
            ]
            + cpp_asmjit_files
            + cpp_fbgemm_files
            + [
                os.path.join(cur_dir, "codegen/embedding_forward_split_cpu.cpp"),
                os.path.join(cur_dir, "codegen/embedding_forward_quantized_host_cpu.cpp"),
                os.path.join(cur_dir, "codegen/embedding_backward_dense_host_cpu.cpp"),
            ],
            include_dirs=[
                cur_dir,
                os.path.join(cur_dir, "include"),
                os.path.join(cur_dir, "../include"),
                os.path.join(cur_dir, "../src"),
                os.path.join(cur_dir, "../third_party/asmjit/src"),
                os.path.join(cur_dir, "../third_party/asmjit/src/core"),
                os.path.join(cur_dir, "../third_party/asmjit/src/x86"),
                os.path.join(cur_dir, "../third_party/cpuinfo/include"),
            ],
            extra_compile_args={"cxx": extra_compile_args},
        )
    ],
    cmdclass={"build_ext": FBGEMM_GPU_BuildExtension},
)
示例#21
0
                'cxx/mcubes.cpp',
                'cxx/mcubes_cpu.cpp',
                'cxx/mcubes_cuda.cu',
                'cxx/grid_interp_cpu.cpp',
                'cxx/grid_interp_cuda.cu'
            ])
        ],
        cmdclass={
            'build_ext': BuildExtension
        }
    )
except:
    print('CUDA environment was not successfully loaded!')
    print('Build only CPU module!')

    from torch.utils.cpp_extension import CppExtension

    setup(
        name='mcubes_module',
        ext_modules=[
            CppExtension('mcubes_module', [
                'cxx/mcubes.cpp',
                'cxx/mcubes_cpu.cpp',
                'cxx/grid_interp_cpu.cpp',
            ])
        ],
        cmdclass={
            'build_ext': BuildExtension
        }
    )
示例#22
0
    compile_args.append('-DHAVE_XZLIB')
    ext_libs.append('lzma')

third_party_libs = [
    "kenlm", "openfst-1.6.7/src/include", "ThreadPool", "boost_1_67_0", "utf8"
]
compile_args.extend(['-DINCLUDE_KENLM', '-DKENLM_MAX_ORDER=6'])
lib_sources = glob.glob('third_party/kenlm/util/*.cc') + glob.glob(
    'third_party/kenlm/lm/*.cc') + glob.glob(
        'third_party/kenlm/util/double-conversion/*.cc') + glob.glob(
            'third_party/openfst-1.6.7/src/lib/*.cc')
lib_sources = [
    fn for fn in lib_sources
    if not (fn.endswith('main.cc') or fn.endswith('test.cc'))
]

third_party_includes = [
    os.path.realpath(os.path.join("third_party", lib))
    for lib in third_party_libs
]
ctc_sources = glob.glob('ctcdecode/src/*.cpp')

extension = CppExtension(name='ctcdecode._ext.ctc_decode',
                         package=True,
                         with_cuda=False,
                         sources=ctc_sources + lib_sources,
                         include_dirs=third_party_includes + include_paths(),
                         libraries=ext_libs,
                         extra_compile_args=compile_args,
                         language='c++')
示例#23
0
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CppExtension

setup(name='sigmoid_cuda_linear_cpp',
      ext_modules=[
          CUDAExtension('sigmoid_cuda', [
              'sigmoid_cuda.cpp',
              'sigmoid_cuda_kernel.cu',
          ]),
          CppExtension('linear_cpp', ['linear.cpp'])
      ],
      cmdclass={'build_ext': BuildExtension})
示例#24
0
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, BuildExtension

setup(name='Align1D',
      version="2.2.0",
      author="Frost Mengmeng Xu",
      author_email="*****@*****.**",
      description="A small package for 1d aligment in cuda",
      long_description="I will write a longer description here :)",
      long_description_content_type="text/markdown",
      url="https://github.com/Frostinassiky/G-TAD",
      ext_modules=[
          CppExtension(name='Align1D',
                       sources=[
                           'Align1D_cuda.cpp',
                           'Align1D_cuda_kernal.cu',
                       ],
                       extra_compile_args={
                           'cxx': ['-std=c++14', '-fopenmp'],
                           'nvcc': ['--expt-relaxed-constexpr']
                       })
      ],
      cmdclass={'build_ext': BuildExtension})
示例#25
0
from setuptools import setup, find_packages
import torch
from torch.utils.cpp_extension import CppExtension, CUDAExtension

ext_modules = [
    CppExtension('graclus_cpu', ['cpu/graclus.cpp']),
    CppExtension('grid_cpu', ['cpu/grid.cpp']),
]
cmdclass = {'build_ext': torch.utils.cpp_extension.BuildExtension}

if torch.cuda.is_available():
    ext_modules += [
        CUDAExtension('graclus_cuda',
                      ['cuda/graclus.cpp', 'cuda/graclus_kernel.cu']),
        CUDAExtension('grid_cuda', ['cuda/grid.cpp', 'cuda/grid_kernel.cu']),
        CUDAExtension('fps_cuda', ['cuda/fps.cpp', 'cuda/fps_kernel.cu']),
        CUDAExtension('nearest_cuda',
                      ['cuda/nearest.cpp', 'cuda/nearest_kernel.cu']),
    ]

__version__ = '1.2.0'
url = 'https://github.com/rusty1s/pytorch_cluster'

install_requires = []
setup_requires = ['pytest-runner']
tests_require = ['pytest', 'pytest-cov']

setup(
    name='torch_cluster',
    version=__version__,
    description='PyTorch Extension Library of Optimized Graph Cluster '
示例#26
0
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension

setup(
    name='horder_cpp',
    ext_modules=[
        CppExtension('horder_cpp', ['horder.cpp']),
    ],
    cmdclass={
        'build_ext': BuildExtension
    })
示例#27
0
from setuptools import setup, find_packages
import torch
from torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME

TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])

extra_compile_args = []
if platform.system() != 'Windows':
    extra_compile_args += ['-Wno-unused-variable']

if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
    extra_compile_args += ['-DVERSION_GE_1_3']

ext_modules = [
    CppExtension('torch_scatter.scatter_cpu', ['cpu/scatter.cpp'],
                 extra_compile_args=extra_compile_args)
]
cmdclass = {'build_ext': torch.utils.cpp_extension.BuildExtension}

if CUDA_HOME is not None:
    ext_modules += [
        CUDAExtension('torch_scatter.scatter_cuda',
                      ['cuda/scatter.cpp', 'cuda/scatter_kernel.cu'])
    ]

__version__ = '1.3.2'
url = 'https://github.com/rusty1s/pytorch_scatter'

install_requires = []
setup_requires = ['pytest-runner']
tests_require = ['pytest', 'pytest-cov']
示例#28
0
    def run(self):
        build_warpctc_so()


class Build(distutils.command.build.build):
    sub_commands = [
        ('build_deps', lambda self: True),
    ] + distutils.command.build.build.sub_commands


setup(
    name='warpctc',
    ext_modules=[
        # apparently pybind does not support submodules like warpctc._warpctc
        CppExtension('warpctc._warpctc', ['src/_warpctc.cpp'],
                     include_dirs=['../include'],
                     library_dirs=['build/lib'],
                     libraries=['warpctc'],
                     extra_link_args=[make_relative_rpath('lib')])
    ],
    packages=find_packages(exclude=['tests']),
    package_data={'warpctc': ['lib/libwarpctc' + shared_object_ext()]},

    cmdclass={
        'build': Build,
        'build_deps': BuildDeps,
        'build_ext': BuildExtension,
        'clean': Clean,
    })
- Better numerical precision for all matrix and batch sizes

Find more details and the most up-to-date information on the project webpage:
https://www.github.com/toshas/torch-householder
"""

setup(
    name='torch_householder',
    version='1.0.0',
    description='Efficient Householder transformation in PyTorch',
    long_description=long_description,
    long_description_content_type='text/markdown',
    install_requires=requirements,
    python_requires='>=3.6',
    packages=find_packages(),
    author='Anton Obukhov',
    license='BSD',
    url='https://www.github.com/toshas/torch-householder',
    ext_modules=[
        CppExtension(
            'torch_householder_cpp',
            [os.path.join('torch_householder', 'householder.cpp')],
        )
    ],
    cmdclass={'build_ext': BuildExtension},
    keywords=[
        'pytorch', 'householder', 'orgqr', 'efficient', 'differentiable',
        'orthogonal', 'transformation', 'unitary', 'matrices'
    ],
)
示例#30
0
current_version = parse_version(torch.__version__)

if current_version < min_version:  # PyTorch before 1.0
    raise NotImplementedError('Only support torch>=1.0.0')

print('Including CUDA code.')

current_dir = os.path.dirname(os.path.realpath(__file__))

if torch.cuda.is_available():
    setup(name='rod_align_api',
          ext_modules=[
              CUDAExtension(name='rod_align_api',
                            sources=[
                                'src/rod_align_cuda.cpp',
                                'src/rod_align_kernel.cu'
                            ],
                            include_dirs=[current_dir] +
                            torch.utils.cpp_extension.include_paths(cuda=True))
          ],
          cmdclass={'build_ext': BuildExtension})
else:
    setup(name='rod_align_api',
          ext_modules=[
              CppExtension(name='rod_align_api',
                           sources=['src/rod_align.cpp'],
                           include_dirs=[current_dir] +
                           torch.utils.cpp_extension.include_paths(cuda=False))
          ],
          cmdclass={'build_ext': BuildExtension})