示例#1
0
def create_extension():
    srcs = []
    srcs += glob.glob('srcs/cpp/src/torch/common.cpp')
    srcs += glob.glob('srcs/cpp/src/torch/ops/cpu/*.cpp')

    include_dirs = [
        # FIXME: use tmp dir of pip
        os.path.join(os.path.dirname(__file__), './srcs/cpp/include')
    ]
    library_dirs = [kungfu_library_dir()]
    libraries = ['kungfu', 'kungfu_python']

    with_cuda = None
    import torch
    if torch.cuda.is_available():
        srcs += glob.glob('srcs/cpp/src/cuda/*.cpp')
        srcs += glob.glob('srcs/cpp/src/torch/ops/cuda/*.cpp')
        with_cuda = True
        include_dirs += [os.path.join(find_cuda(), 'include')]
        library_dirs += [os.path.join(find_cuda(), 'lib64')]
        libraries += ['cudart']
        srcs += ['srcs/cpp/src/torch/module_cuda.cpp']
    else:
        srcs += ['srcs/cpp/src/torch/module_cpu.cpp']

    return cpp_extension.CppExtension(
        'kungfu_torch_ops',
        srcs,
        include_dirs=include_dirs,
        library_dirs=library_dirs,
        libraries=libraries,
        with_cuda=with_cuda,
    )
示例#2
0
def main() -> None:
    # Spatially-Varying Filtering
    name = 'pysrwarp'
    name_cuda = 'svf_cuda'
    target_dir = 'cuda'
    setuptools.setup(
        name=name,
        version='1.0.0',
        author='Sanghyun Son',
        author_email='*****@*****.**',
        packages=setuptools.find_packages(),
        ext_modules=[
            cpp_extension.CppExtension(
                name='srwarp_cuda',
                sources=[path.join('cuda', name_cuda + '.cpp')],
                libraries=[
                    name_cuda + '_kernel',
                    name_cuda + '_half_kernel',
                    name_cuda + '_projective_grid_kernel',
                ],
                library_dirs=[path.join('.', target_dir)],
                extra_compile_args=['-g', '-fPIC'],
            )
        ],
        cmdclass={'build_ext': cpp_extension.BuildExtension},
    )
    return
示例#3
0
def main():
    extra_compile_args = []
    extra_link_args = []

    grpc_objects = [
        f"{PREFIX}/lib/libgrpc++.a",
        f"{PREFIX}/lib/libgrpc.a",
        f"{PREFIX}/lib/libgpr.a",
        f"{PREFIX}/lib/libaddress_sorting.a",
    ]

    include_dirs = cpp_extension.include_paths() + [
        np.get_include(),
        f"{PREFIX}/include",
    ]
    libraries = []

    if sys.platform == "darwin":
        extra_compile_args += ["-stdlib=libc++", "-mmacosx-version-min=10.14"]
        extra_link_args += ["-stdlib=libc++", "-mmacosx-version-min=10.14"]

        # Relevant only when c-cares is not embedded in grpc, e.g. when
        # installing grpc via homebrew.
        libraries.append("cares")
    elif sys.platform == "linux":
        libraries.append("z")

    grpc_objects.append(f"{PREFIX}/lib/libprotobuf.a")

    libtorchbeast = cpp_extension.CppExtension(
        name="libtorchbeast._C",
        sources=[
            "libtorchbeast/libtorchbeast.cc",
            "libtorchbeast/actorpool.cc",
            "libtorchbeast/rpcenv.cc",
            "libtorchbeast/rpcenv.pb.cc",
            "libtorchbeast/rpcenv.grpc.pb.cc",
        ],
        include_dirs=include_dirs,
        libraries=libraries,
        language="c++",
        extra_compile_args=["-std=c++17"] + extra_compile_args,
        extra_link_args=extra_link_args,
        extra_objects=grpc_objects,
    )

    setuptools.setup(
        name="libtorchbeast",
        packages=["libtorchbeast"],
        version="0.0.14",
        ext_modules=[libtorchbeast],
        cmdclass={"build_ext": build_ext},
        test_suite="setup.test_suite",
        install_requires=[
            'setuptools'
        ],  # HACK: any package is ok, but somehow must not be empty
    )
示例#4
0
def ext_modules():
    extensions = [
        cpp_extension.CppExtension(
            "torchsort.isotonic_cpu",
            sources=["torchsort/isotonic_cpu.cpp"],
            extra_compile_args=compile_args(),
        ),
    ]
    if cuda_toolkit_available():
        extensions.append(
            cpp_extension.CUDAExtension(
                "torchsort.isotonic_cuda",
                sources=["torchsort/isotonic_cuda.cu"],
            ))
    return extensions
示例#5
0
def build_cpu_extension(name, src_files=None):

    path_parts = name.split('.')

    base_path = os.path.join("src", *path_parts)
    src_path = os.path.join(base_path, "src")
    incl_path = os.path.join(base_path, "include")

    ext_args = dict()
    ext_args.update(default_extension_args_cpu)

    ext_name = f"{name}._cpp"

    if src_files is None:
        src_files = [f for f in os.listdir(src_path) if f.endswith(".cpp")]

    ext_args["sources"] = [os.path.join(src_path, f) for f in src_files]
    ext_args["include_dirs"] = [incl_path]

    extension = cpp_extension.CppExtension(ext_name, **ext_args)

    return extension
示例#6
0
        sources=["fairseq/data/token_block_utils_fast.pyx"],
        language="c++",
        extra_compile_args=extra_compile_args,
    ),
]

cmdclass = {}

try:
    # torch is not available when generating docs
    from torch.utils import cpp_extension

    extensions.extend([
        cpp_extension.CppExtension(
            "fairseq.libnat",
            sources=[
                "fairseq/clib/libnat/edit_dist.cpp",
            ],
        )
    ])
    if "CUDA_HOME" in os.environ:
        extensions.extend([
            cpp_extension.CppExtension(
                "fairseq.libnat_cuda",
                sources=[
                    "fairseq/clib/libnat_cuda/edit_dist.cu",
                    "fairseq/clib/libnat_cuda/binding.cpp",
                ],
            ),
            cpp_extension.CppExtension(
                "fairseq.ngram_repeat_block_cuda",
                sources=[
示例#7
0
extension_sources = [str(p) for p in this_dir.joinpath("csrc").rglob("*.cpp")]

# Npcomp bits.
include_dirs = npcomp_build.get_include_dirs()
lib_dirs = npcomp_build.get_lib_dirs()
npcomp_libs = [npcomp_build.get_capi_link_library_name()]
# TODO: Export this in some way from an npcomp config file include vs needing
# it loose here.
compile_args = ["-DMLIR_PYTHON_PACKAGE_PREFIX=npcomp."]

setup(
    name="npcomp-torch",
    ext_modules=[
        cpp_extension.CppExtension(name="_torch_mlir",
                                   sources=extension_sources,
                                   include_dirs=include_dirs,
                                   library_dirs=lib_dirs,
                                   libraries=npcomp_libs,
                                   extra_compile_args=compile_args),
    ],
    cmdclass={"build_ext": cpp_extension.BuildExtension},
    package_dir={
        "": "./python",
    },
    packages=find_packages("./python",
                           include=[
                               "torch_mlir",
                               "torch_mlir.*",
                               "torch_mlir_torchscript",
                               "torch_mlir_torchscript.*",
                               "torch_mlir_torchscript_e2e_test_configs",
                               "torch_mlir_torchscript_e2e_test_configs.*",
示例#8
0
# Build with
#   CXX=c++ python3 setup.py build develop

import setuptools
import sys

from torch.utils import cpp_extension

extra_compile_args = []
extra_link_args = []

if sys.platform == 'darwin':
    extra_compile_args += ['-stdlib=libc++', '-mmacosx-version-min=10.12']
    extra_link_args += ['-stdlib=libc++']

tensorbug = cpp_extension.CppExtension(
    name='tensorbug',
    sources=['bug.cc'],
    language='c++',
    extra_compile_args=['-std=c++17'] + extra_compile_args,
    extra_link_args=extra_link_args,
)

setuptools.setup(name='tensorbug',
                 ext_modules=[tensorbug],
                 cmdclass={'build_ext': cpp_extension.BuildExtension})
示例#9
0
          license=LICENSE,
          keywords='tensorflow machine learning rnn lstm gru custom op',
          packages=['haste_tf'],
          package_dir={'haste_tf': 'tf'},
          package_data={'haste_tf': ['*.so']},
          install_requires=[],
          zip_safe=False,
          distclass=BinaryDistribution,
          classifiers=CLASSIFIERS)
elif sys.argv[1] == 'haste_pytorch':
    del sys.argv[1]
    from glob import glob
    from torch.utils import cpp_extension
    extension = cpp_extension.CppExtension(
        'haste_pytorch_lib',
        sources=glob('pytorch/*.cc'),
        include_dirs=['lib', '/usr/local/cuda/include'],
        libraries=['haste'],
        library_dirs=['.'])
    setup(name='haste_pytorch',
          version=VERSION,
          description=DESCRIPTION,
          author=AUTHOR,
          author_email=AUTHOR_EMAIL,
          url=URL,
          license=LICENSE,
          keywords='pytorch machine learning rnn lstm gru custom op',
          packages=['haste_pytorch'],
          package_dir={'haste_pytorch': 'pytorch'},
          install_requires=[],
          ext_modules=[extension],
          cmdclass={'build_ext': cpp_extension.BuildExtension},
示例#10
0
    ),
]


cmdclass = {}


try:
    # torch is not available when generating docs
    from torch.utils import cpp_extension

    extensions.extend(
        [
            cpp_extension.CppExtension(
                "fairseq.libnat",
                sources=[
                    "fairseq/clib/libnat/edit_dist.cpp",
                ],
            )
        ]
    )

    if "CUDA_HOME" in os.environ:
        extensions.extend(
            [
                cpp_extension.CppExtension(
                    "fairseq.libnat_cuda",
                    sources=[
                        "fairseq/clib/libnat_cuda/edit_dist.cu",
                        "fairseq/clib/libnat_cuda/binding.cpp",
                    ],
                )
示例#11
0
文件: setup.py 项目: gessfred/gRPC
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='q_par_cpp',
      ext_modules=[
          cpp_extension.CppExtension('q_par_cpp', ['q_par.cpp'],
                                     extra_compile_args=["-fopenmp"])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
示例#12
0
文件: setup.py 项目: zxlzr/MC-BERT
    ),
    NumpyExtension(
        'fairseq.data.data_utils_fast',
        sources=['fairseq/data/data_utils_fast.pyx'],
        language='c++',
        extra_compile_args=extra_compile_args,
    ),
    NumpyExtension(
        'fairseq.data.token_block_utils_fast',
        sources=['fairseq/data/token_block_utils_fast.pyx'],
        language='c++',
        extra_compile_args=extra_compile_args,
    ),
    cpp_extension.CppExtension(
        'fairseq.libnat',
        sources=[
            'fairseq/clib/libnat/edit_dist.cpp',
        ],
    )
]

setup(
    name='fairseq',
    version='0.8.0',
    description='Facebook AI Research Sequence-to-Sequence Toolkit',
    url='https://github.com/pytorch/fairseq',
    classifiers=[
        'Intended Audience :: Science/Research',
        'License :: OSI Approved :: MIT License',
        'Programming Language :: Python :: 3.5',
        'Programming Language :: Python :: 3.6',
        'Topic :: Scientific/Engineering :: Artificial Intelligence',
示例#13
0
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import sys
import setuptools

from torch.utils import cpp_extension

print(cpp_extension.include_paths())

__version__ = '0.0.1'

extensions = [
    cpp_extension.CppExtension(
        'src.qp_fast',
        ["src/qp_fast.cpp"],
        language='c++',
        extra_compile_args=['-std=c++17'],
    ),
]

setup(name='latent_decision_tree',
      version=__version__,
      author="VZ,MK,VN",
      ext_modules=extensions,
      setup_requires=['pybind11>=2.5.0'],
      cmdclass={'build_ext': cpp_extension.BuildExtension},
      zip_safe=False)
示例#14
0
    plugin_compile_args.extend(["-g", "-O0"])

plugin_sources = ["src/torch_ucc.cpp", "src/torch_ucc_comm.cpp"]
plugin_include_dirs = [
    "{}/include/".format(ucc_plugin_dir), "{}/include/".format(ucx_home),
    "{}/include/".format(ucc_home)
]
plugin_library_dirs = ["{}/lib/".format(ucx_home), "{}/lib/".format(ucc_home)]
plugin_libraries = ["ucp", "uct", "ucm", "ucs", "ucc"]

with_cuda = os.environ.get("WITH_CUDA")
if with_cuda is None or with_cuda == "no":
    print("CUDA support is disabled")
    module = cpp_extension.CppExtension(name="torch_ucc",
                                        sources=plugin_sources,
                                        include_dirs=plugin_include_dirs,
                                        library_dirs=plugin_library_dirs,
                                        libraries=plugin_libraries,
                                        extra_compile_args=plugin_compile_args)
else:
    print("CUDA support is enabled")
    plugin_compile_args.append("-DUSE_CUDA")
    module = cpp_extension.CUDAExtension(
        name="torch_ucc",
        sources=plugin_sources,
        include_dirs=plugin_include_dirs,
        library_dirs=plugin_library_dirs,
        libraries=plugin_libraries,
        extra_compile_args=plugin_compile_args)
setup(name="torch-ucc",
      version="1.0.0",
      ext_modules=[module],
示例#15
0
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='voxelizer_cpp',
      ext_modules=[
          cpp_extension.CppExtension('voxelizer_cpp', ['voxelizer.cpp'])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
示例#16
0
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='dirpg_cpp',
      ext_modules=[cpp_extension.CppExtension('dirpg_cpp',
                                              sources=['dirpg.cpp',
                                                       'retsp/batched_graphs.cpp',
                                                       'retsp/batched_graphs_tsp.cpp',
                                                       'retsp/a_star_sampling.cpp',
                                                       'retsp/batched_heaps.cpp',
                                                       'retsp/batched_trajectories.cpp',
                                                       'retsp/node_allocator.cpp',
                                                       'retsp/mst_node.cpp',
                                                       'retsp/info_node.cpp',
                                                       'retsp/gumbel_state.cpp',
                                                       'retsp/union_find.cpp',
                                                      ],
                                              include_dirs=['retsp'])],
      headers=[
               'retsp/batched_graphs.h',
               'retsp/batched_graphs_tsp.h',
               'retsp/a_star_sampling.h',
               'retsp/batched_heaps.h',
               'retsp/batched_trajectories.h',
               'retsp/node_allocator.h',
               'retsp/mst_node.h',
               'retsp/info_node.h',
               'retsp/gumbel_state.h',
               'retsp/union_find.h'],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
示例#17
0
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(
    name='kruskals_cpp',
    ext_modules=[cpp_extension.CppExtension('kruskals_cpp', ['kruskals.cpp'])],
    cmdclass={'build_ext': cpp_extension.BuildExtension})
示例#18
0
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='shift_kernel',
      ext_modules=[
          cpp_extension.CppExtension('shift_kernel', ['shift_kernel.cpp'],
                                     extra_compile_args=['-fopenmp', '-O3'])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
示例#19
0
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------

import os

from setuptools import setup
from torch.utils import cpp_extension

filename = os.path.join(os.path.dirname(__file__), "aten_op_executor.cc")
setup(
    name="aten_op_executor",
    ext_modules=[
        cpp_extension.CppExtension(name="aten_op_executor", sources=[filename])
    ],
    cmdclass={"build_ext": cpp_extension.BuildExtension},
)
示例#20
0
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='sparse_coo_tensor_cpp',
      ext_modules=[
          cpp_extension.CppExtension('sparse_coo_tensor_cpp',
                                     ['sparse_coo_tensor.cpp'],
                                     extra_compile_args=["-lcusparse"])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
示例#21
0
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='lltm_cpp',
      ext_modules=[
          cpp_extension.CppExtension('lltm_cpp', ['./models/ops/lltm.cpp'])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})

setup(name='lltm_cuda',
      ext_modules=[
          cpp_extension.CUDAExtension('lltm_cuda', [
              './models/ops/lltm_cuda.cpp',
              './models/ops/lltm_cuda_kernel.cu',
          ])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
        'fairseq.data.token_block_utils_fast',
        sources=['fairseq/data/token_block_utils_fast.pyx'],
        language='c++',
        extra_compile_args=extra_compile_args,
    ),
]

cmdclass = {}

try:
    # torch is not available when generating docs
    from torch.utils import cpp_extension
    extensions.extend([
        cpp_extension.CppExtension(
            'fairseq.libnat',
            sources=[
                'fairseq/clib/libnat/edit_dist.cpp',
            ],
        )
    ])

    if 'CUDA_HOME' in os.environ:
        extensions.extend([
            cpp_extension.CppExtension(
                'fairseq.libnat_cuda',
                sources=[
                    'fairseq/clib/libnat_cuda/edit_dist.cu',
                    'fairseq/clib/libnat_cuda/binding.cpp'
                ],
            )
        ])
    cmdclass['build_ext'] = cpp_extension.BuildExtension
示例#23
0
from setuptools import setup, Extension
from torch.utils import cpp_extension
import os
LIBXSMM_ROOT=os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd()))))
print("LIBXSMM root directory path: ", LIBXSMM_ROOT)

setup(name='conv1dopti-layer',
      ext_modules=[cpp_extension.CppExtension('Conv1dOpti_cpp', ['Conv1dOpti.cpp'], \
                                    author="Narendra Chaudhary", \
                                    author_email="*****@*****.**", \
                                    description="PyTorch Extension for optimized 1D dilated convolutional layer", \
                                    extra_compile_args=['-O3', '-g', \
                                    '-fopenmp-simd', '-fopenmp', '-march=native',\
                                    # '-mprefer-vector-width=512', '-mavx512f', '-mavx512cd', '-mavx512bw', \
                                    # '-mavx512dq', '-mavx512vl', '-mavx512ifma', '-mavx512vbmi' \
                                    ], \
                                    include_dirs=['{}/include/'.format(LIBXSMM_ROOT)], \
                                    library_dirs=['{}/lib/'.format(LIBXSMM_ROOT)], \
                                    libraries=['xsmm'], \
                                    )],
      py_modules=['Conv1dOpti_ext'],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
示例#24
0
#encoding: utf-8

from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='res_attn_cpp',
      ext_modules=[
          cpp_extension.CppExtension('res_attn_cpp',
                                     ['modules/cpp/base/resattn/attn.cpp'])
      ],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
示例#25
0
#encoding: utf-8

from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name='lgate_cpp', ext_modules=[cpp_extension.CppExtension('lgate_cpp', ['modules/cpp/hplstm/lgate.cpp'])], cmdclass={'build_ext': cpp_extension.BuildExtension})
示例#26
0
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------

import os
from setuptools import setup, Extension
from torch.utils import cpp_extension

filename = os.path.join(os.path.dirname(__file__),
                        'torch_interop_utils.cc')
setup(name='torch_interop_utils',
      ext_modules=[cpp_extension.CppExtension(name='torch_interop_utils',
                                              sources=[filename])],
      cmdclass={'build_ext': cpp_extension.BuildExtension})
示例#27
0
文件: setup.py 项目: scheiblr/fairseq
        sources=["fairseq/data/token_block_utils_fast.pyx"],
        language="c++",
        extra_compile_args=extra_compile_args,
    ),
]

cmdclass = {}

try:
    # torch is not available when generating docs
    from torch.utils import cpp_extension

    extensions.extend([
        cpp_extension.CppExtension(
            "fairseq.libbase",
            sources=[
                "fairseq/clib/libbase/balanced_assignment.cpp",
            ],
        )
    ])

    extensions.extend([
        cpp_extension.CppExtension(
            "fairseq.libnat",
            sources=[
                "fairseq/clib/libnat/edit_dist.cpp",
            ],
        ),
        cpp_extension.CppExtension(
            "alignment_train_cpu_binding",
            sources=[
                "examples/operators/alignment_train_cpu.cpp",
示例#28
0
文件: setup.py 项目: selflein/fasth
from setuptools import setup
from torch.utils import cpp_extension

setup(
  name='fasth',
  version='0.0.1',
  license='LICENSE',
  description='',
  packages=["fasth"],
  long_description=open('README.md').read(),
  long_description_content_type="text/markdown",
  ext_modules=[cpp_extension.CppExtension('fasth', ['fasth/fasth_cuda.cu', 'fasth/fasth.cpp'])],
  cmdclass={'build_ext': cpp_extension.BuildExtension},
  python_requires=">=3.6",
  install_requires=[
      "torch>=1.3.1",
      "numpy",
      "ninja",
  ],
)
示例#29
0
from setuptools import setup, Extension
from torch.utils import cpp_extension

setup(name="cctc2",
      ext_modules=[
          cpp_extension.CppExtension(
              "cctc2",
              ["cctc2.cpp"],
              extra_compile_args=["-g"],
          )
      ],
      cmdclass={"build_ext": cpp_extension.BuildExtension})
示例#30
0
sourceFiles = ['hingetree.cpp']
extraCflags = ['-O2']
extraCudaFlags = ['-O2']

if torch.cuda.is_available():
    sourceFiles.append('hingetree_gpu.cu')
    extraCflags.append('-DWITH_CUDA=1')

    setup(name='hingetree_cpp',
          ext_modules=[
              cpp_extension.CUDAExtension(name='hingetree_cpp',
                                          sources=sourceFiles,
                                          extra_compile_args={
                                              'cxx': extraCflags,
                                              'nvcc': extraCudaFlags
                                          })
          ],
          cmdclass={'build_ext': cpp_extension.BuildExtension})
else:
    setup(name='hingetree_cpp',
          ext_modules=[
              cpp_extension.CppExtension(name='hingetree_cpp',
                                         sources=sourceFiles,
                                         extra_compile_args={
                                             'cxx': extraCflags,
                                             'nvcc': extraCudaFlags
                                         })
          ],
          cmdclass={'build_ext': cpp_extension.BuildExtension})