示例#1
0
    def compile(self, obj: str, src: str, ext: Extension) -> None:
        if self._context.use_hip:
            raise RuntimeError('ROCm is not supported on Windows')

        compiler_so = build.get_nvcc_path()
        cc_args = self._get_preprocess_options(ext) + ['-c']
        cuda_version = build.get_cuda_version()
        postargs = _nvcc_gencode_options(cuda_version) + ['-O2']
        if cuda_version >= 11020:
            # MSVC 14.0 (2015) is deprecated for CUDA 11.2 but we need it
            # to build CuPy because some Python versions were built using it.
            # REF: https://wiki.python.org/moin/WindowsCompilers
            postargs += ['-allow-unsupported-compiler']
        postargs += ['-Xcompiler', '/MD', '-D_USE_MATH_DEFINES']
        # This is to compile thrust with MSVC2015
        if cuda_version >= 11020:
            postargs += ['--std=c++14']
            num_threads = int(os.environ.get('CUPY_NUM_NVCC_THREADS', '2'))
            postargs += [f'-t{num_threads}']
        cl_exe_path = self._find_host_compiler_path()
        if cl_exe_path is None:
            print('Warning: Host compiler path could not be detected')
        else:
            postargs += ['--compiler-bindir', cl_exe_path]
        print('NVCC options:', postargs)
        self.spawn(compiler_so + cc_args + [src, '-o', obj] + postargs)
def cythonize(extensions, ctx: Context):
    # Delay importing Cython as it may be installed via setup_requires if
    # the user does not have Cython installed.
    import Cython
    import Cython.Build
    cython_version = pkg_resources.parse_version(Cython.__version__)

    directives = {
        'linetrace': ctx.linetrace,
        'profile': ctx.profile,
        # Embed signatures for Sphinx documentation.
        'embedsignature': True,
    }

    cythonize_options = {'annotate': ctx.annotate}

    # Compile-time constants to be used in Cython code
    compile_time_env = cythonize_options.get('compile_time_env')
    if compile_time_env is None:
        compile_time_env = {}
        cythonize_options['compile_time_env'] = compile_time_env

    # Enable CUDA Python.
    # TODO: add `cuda` to `setup_requires` only when this flag is set
    use_cuda_python = cupy_builder.get_context().use_cuda_python
    compile_time_env['CUPY_USE_CUDA_PYTHON'] = use_cuda_python
    if use_cuda_python:
        print('Using CUDA Python')

    compile_time_env['CUPY_CUFFT_STATIC'] = False
    compile_time_env['CUPY_CYTHON_VERSION'] = str(cython_version)
    if ctx.use_stub:  # on RTD
        compile_time_env['CUPY_CUDA_VERSION'] = 0
        compile_time_env['CUPY_HIP_VERSION'] = 0
    elif use_hip:  # on ROCm/HIP
        compile_time_env['CUPY_CUDA_VERSION'] = 0
        compile_time_env['CUPY_HIP_VERSION'] = build.get_hip_version()
    else:  # on CUDA
        compile_time_env['CUPY_CUDA_VERSION'] = build.get_cuda_version()
        compile_time_env['CUPY_HIP_VERSION'] = 0

    return Cython.Build.cythonize(extensions,
                                  verbose=True,
                                  language_level=3,
                                  compiler_directives=directives,
                                  **cythonize_options)
    def _compile_cu(self,
                    sources,
                    output_dir=None,
                    macros=None,
                    include_dirs=None,
                    debug=0,
                    extra_preargs=None,
                    extra_postargs=None,
                    depends=None):
        # Compile CUDA C files, mainly derived from UnixCCompiler._compile().
        macros, objects, extra_postargs, pp_opts, _build = \
            self._setup_compile(output_dir, macros, include_dirs, sources,
                                depends, extra_postargs)

        compiler_so = build.get_nvcc_path()
        cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
        cuda_version = build.get_cuda_version()
        postargs = _nvcc_gencode_options(cuda_version) + ['-O2']
        if cuda_version >= 11020:
            # MSVC 14.0 (2015) is deprecated for CUDA 11.2 but we need it
            # to build CuPy because some Python versions were built using it.
            # REF: https://wiki.python.org/moin/WindowsCompilers
            postargs += ['-allow-unsupported-compiler']
        postargs += ['-Xcompiler', '/MD', '-D_USE_MATH_DEFINES']
        # This is to compile thrust with MSVC2015
        if cuda_version >= 11020:
            postargs += ['--std=c++14']
            num_threads = int(os.environ.get('CUPY_NUM_NVCC_THREADS', '2'))
            postargs += [f'-t{num_threads}']
        print('NVCC options:', postargs)

        for obj in objects:
            try:
                src, ext = _build[obj]
            except KeyError:
                continue
            try:
                self.spawn(compiler_so + cc_args + [src, '-o', obj] + postargs)
            except errors.DistutilsExecError as e:
                raise errors.CompileError(str(e))

        return objects
示例#4
0
    def _compile_unix_nvcc(self, obj: str, src: str, ext: Extension) -> None:
        cc_args = self._get_preprocess_options(ext) + ['-c']

        # For CUDA C source files, compile them with NVCC.
        nvcc_path = build.get_nvcc_path()
        base_opts = build.get_compiler_base_options(nvcc_path)
        compiler_so = nvcc_path

        cuda_version = build.get_cuda_version()
        postargs = _nvcc_gencode_options(cuda_version) + [
            '-O2', '--compiler-options="-fPIC"'
        ]
        if cuda_version >= 11020:
            postargs += ['--std=c++14']
            num_threads = int(os.environ.get('CUPY_NUM_NVCC_THREADS', '2'))
            postargs += [f'-t{num_threads}']
        else:
            postargs += ['--std=c++11']
        postargs += ['-Xcompiler=-fno-gnu-unique']
        print('NVCC options:', postargs)
        self.spawn(compiler_so + base_opts + cc_args + [src, '-o', obj] +
                   postargs)
示例#5
0
    def _compile_unix_nvcc(self,
                           obj, src, ext, cc_args, extra_postargs, pp_opts):
        # For CUDA C source files, compile them with NVCC.
        nvcc_path = build.get_nvcc_path()
        base_opts = build.get_compiler_base_options(nvcc_path)
        compiler_so = nvcc_path

        cuda_version = build.get_cuda_version()
        postargs = _nvcc_gencode_options(cuda_version) + [
            '-O2', '--compiler-options="-fPIC"']
        if cuda_version >= 11020:
            postargs += ['--std=c++14']
            num_threads = int(os.environ.get('CUPY_NUM_NVCC_THREADS', '2'))
            postargs += [f'-t{num_threads}']
        else:
            postargs += ['--std=c++11']
        print('NVCC options:', postargs)
        try:
            self.spawn(compiler_so + base_opts + cc_args + [src, '-o', obj] +
                       postargs)
        except errors.DistutilsExecError as e:
            raise errors.CompileError(str(e))
示例#6
0
 def test_check_cuda_version(self):
     with self.assertRaises(RuntimeError):
         build.get_cuda_version()
     assert build.check_cuda_version(self.compiler, self.settings)
     assert isinstance(build.get_cuda_version(), int)
     assert isinstance(build.get_cuda_version(True), str)
def preconfigure_modules(MODULES, compiler, settings):
    """Returns a list of modules buildable in given environment and settings.

    For each module in MODULES list, this function checks if the module
    can be built in the current environment and reports it.
    Returns a list of module names available.
    """

    nvcc_path = build.get_nvcc_path()
    hipcc_path = build.get_hipcc_path()
    summary = [
        '',
        '************************************************************',
        '* CuPy Configuration Summary                               *',
        '************************************************************',
        '',
        'Build Environment:',
        '  Include directories: {}'.format(str(settings['include_dirs'])),
        '  Library directories: {}'.format(str(settings['library_dirs'])),
        '  nvcc command       : {}'.format(
            nvcc_path if nvcc_path else '(not found)'),
        '  hipcc command      : {}'.format(
            hipcc_path if hipcc_path else '(not found)'),
        '',
        'Environment Variables:',
    ]

    for key in [
            'CFLAGS', 'LDFLAGS', 'LIBRARY_PATH', 'CUDA_PATH',
            'NVTOOLSEXT_PATH', 'NVCC', 'HIPCC', 'ROCM_HOME'
    ]:
        summary += ['  {:<16}: {}'.format(key, os.environ.get(key, '(none)'))]

    summary += [
        '',
        'Modules:',
    ]

    ret = []
    for module in MODULES:
        installed = False
        status = 'No'
        errmsg = []

        if module['name'] == 'cutensor':
            cutensor_path = os.environ.get('CUTENSOR_PATH', '')
            inc_path = os.path.join(cutensor_path, 'include')
            if os.path.exists(inc_path):
                settings['include_dirs'].append(inc_path)
            cuda_version = build.get_cuda_version()
            cuda_major = str(cuda_version // 1000)
            cuda_major_minor = cuda_major + '.' + \
                str((cuda_version // 10) % 100)
            for cuda_ver in (cuda_major_minor, cuda_major):
                lib_path = os.path.join(cutensor_path, 'lib', cuda_ver)
                if os.path.exists(lib_path):
                    settings['library_dirs'].append(lib_path)
                    break

        # In ROCm 4.1 and later, we need to use the independent version of
        # hipfft as well as rocfft. We configure the lists of include
        # directories and libraries to link here depending on ROCm version
        # before the configuration process following.
        if use_hip and module['name'] == 'cuda':
            if module['check_method'](compiler, settings):
                hip_version = module['version_method']()
                if hip_version >= 401:
                    rocm_path = build.get_rocm_path()
                    inc_path = os.path.join(rocm_path, 'hipfft', 'include')
                    settings['include_dirs'].insert(0, inc_path)
                    lib_path = os.path.join(rocm_path, 'hipfft', 'lib')
                    settings['library_dirs'].insert(0, lib_path)
                # n.b., this modifieds MODULES['cuda']['libraries'] inplace
                canonicalize_hip_libraries(hip_version, module['libraries'])

        print('')
        print('-------- Configuring Module: {} --------'.format(
            module['name']))
        sys.stdout.flush()
        if not check_library(
                compiler,
                includes=module['include'],
                include_dirs=settings['include_dirs'],
                define_macros=settings['define_macros'],
                extra_compile_args=settings['extra_compile_args']):
            errmsg = [
                'Include files not found: %s' % module['include'],
                'Check your CFLAGS environment variable.'
            ]
        elif not check_library(
                compiler,
                libraries=module['libraries'],
                library_dirs=settings['library_dirs'],
                define_macros=settings['define_macros'],
                extra_compile_args=settings['extra_compile_args']):
            errmsg = [
                'Cannot link libraries: %s' % module['libraries'],
                'Check your LDFLAGS environment variable.'
            ]
        elif ('check_method' in module
              and not module['check_method'](compiler, settings)):
            # Fail on per-library condition check (version requirements etc.)
            installed = True
            errmsg = ['The library is installed but not supported.']
        elif (module['name'] in ('thrust', 'cub', 'random')
              and (nvcc_path is None and hipcc_path is None)):
            installed = True
            cmd = 'nvcc' if not use_hip else 'hipcc'
            errmsg = [
                '{} command could not be found in PATH.'.format(cmd),
                'Check your PATH environment variable.'
            ]
        else:
            installed = True
            status = 'Yes'
            ret.append(module['name'])

        if installed and 'version_method' in module:
            status += ' (version {})'.format(module['version_method'](True))

        summary += ['  {:<10}: {}'.format(module['name'], status)]

        # If error message exists...
        if len(errmsg) != 0:
            summary += ['    -> {}'.format(m) for m in errmsg]

            # Skip checking other modules when CUDA is unavailable.
            if module['name'] == 'cuda':
                break

    # Get a list of the CC of the devices connected to this node
    if not use_hip:
        build.check_compute_capabilities(compiler, settings)

    if len(ret) != len(MODULES):
        if 'cuda' in ret:
            lines = [
                'WARNING: Some modules could not be configured.',
                'CuPy will be installed without these modules.',
            ]
        else:
            lines = [
                'ERROR: CUDA could not be found on your system.',
                '',
                'HINT: You are trying to build CuPy from source, '
                'which is NOT recommended for general use.',
                '      Please consider using binary packages instead.',
                '',
            ]
        summary += [
            '',
        ] + lines + [
            'Please refer to the Installation Guide for details:',
            'https://docs.cupy.dev/en/stable/install.html',
            '',
        ]

    summary += [
        '************************************************************',
        '',
    ]

    print('\n'.join(summary))
    return ret, settings