def build_extensions(self): import platform from distutils import sysconfig if (hasattr(self.compiler, 'compiler') and len(self.compiler.compiler) > 0): cc_name = self.compiler.compiler[0] stdcpp = '-std=c++11' if 'gcc' in cc_name and not _check_gcc_cpp11(cc_name): stdcpp = '-std=c++0x' for e in self.extensions: e.extra_compile_args.append(stdcpp) e.extra_compile_args.append('-Wno-deprecated-declarations') e.extra_compile_args.append('-Wno-unused-local-typedefs') e.extra_compile_args.append('-Wno-sign-compare') e.extra_compile_args.append('-Wno-self-assign') e.extra_compile_args.append('-Wno-macro-redefined') e.extra_compile_args.append('-Wno-unused-const-variable') conf_vars = sysconfig.get_config_vars() if 'MACOSX_DEPLOYMENT_TARGET' in conf_vars and len(conf_vars[ 'MACOSX_DEPLOYMENT_TARGET']) > 0: _v1, _v2 = conf_vars['MACOSX_DEPLOYMENT_TARGET'].split('.') if int(_v1) == 10 and int(_v2) < 9: stdcpp = '--stdlib=libc++' for e in self.extensions: e.extra_compile_args.append(stdcpp) build_ext.build_extensions(self)
def build_extensions(self): import subprocess subprocess.call(["make", "-f", "Makefile", "clean"], cwd=XPALIB_DIR) subprocess.call(["sh", "./configure"], cwd=XPALIB_DIR) subprocess.call(["make", "-f", "Makefile"], cwd=XPALIB_DIR) build_ext.build_extensions(self)
def build_extensions(self): compiler = self.compiler if sys.platform != 'win32': build_ext.build_extensions(self) else: print("Warning: the C extensions will not be built since the compiler could not be found.\n"\ "See https://github.com/bsmurphy/PyKrige/issues/8 ")
def build_extensions(self): numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') for ext in self.extensions: if hasattr(ext, 'include_dirs') and not numpy_incl in ext.include_dirs: ext.include_dirs.append(numpy_incl) _build_ext.build_extensions(self)
def build_extensions(self): customize_compiler(self.compiler) try: self.compiler.compiler_so.remove("-Wstrict-prototypes") except (AttributeError, ValueError): pass build_ext.build_extensions(self)
def build_extensions(self): print('Build configuration is:') for opt, value in c_options.items(): print(' * {0} = {1}'.format(opt, value)) print('Generate config.h') config_h = join(dirname(__file__), 'kivy', 'graphics', 'config.h') with open(config_h, 'w') as fd: fd.write('// Autogenerated file for Kivy C configuration\n') for k, v in c_options.items(): fd.write('#define __{0} {1}\n'.format(k.upper(), int(v))) print('Generate config.pxi') config_pxi = join(dirname(__file__), 'kivy', 'graphics', 'config.pxi') with open(config_pxi, 'w') as fd: fd.write('# Autogenerated file for Kivy Cython configuration\n') for k, v in c_options.items(): fd.write('DEF {0} = {1}\n'.format(k.upper(), int(v))) fd.write('DEF PY3 = {0}\n'.format(int(PY3))) c = self.compiler.compiler_type print('Detected compiler is {}'.format(c)) if c != 'msvc': for e in self.extensions: e.extra_link_args += ['-lm'] build_ext.build_extensions(self)
def build_extensions(self): print('Build configuration is:') for opt, value in c_options.items(): print(' * {0} = {1}'.format(opt, value)) print('Generate config.h') config_h_fn = expand('graphics', 'config.h') config_h = '// Autogenerated file for Kivy C configuration\n' config_h += '#define __PY3 {0}\n'.format(int(PY3)) for k, v in c_options.items(): config_h += '#define __{0} {1}\n'.format(k.upper(), int(v)) self.update_if_changed(config_h_fn, config_h) print('Generate config.pxi') config_pxi_fn = expand('graphics', 'config.pxi') # update the pxi only if the content changed config_pxi = '# Autogenerated file for Kivy Cython configuration\n' config_pxi += 'DEF PY3 = {0}\n'.format(int(PY3)) for k, v in c_options.items(): config_pxi += 'DEF {0} = {1}\n'.format(k.upper(), int(v)) self.update_if_changed(config_pxi_fn, config_pxi) c = self.compiler.compiler_type print('Detected compiler is {}'.format(c)) if c != 'msvc': for e in self.extensions: e.extra_link_args += ['-lm'] build_ext.build_extensions(self)
def build_extensions(self): binary = self.compiler.compiler[0] if is_clang(binary): for e in self.extensions: e.extra_compile_args.append('-stdlib=libc++') if platform.system() == 'Darwin': e.extra_compile_args.append('-mmacosx-version-min=10.7') build_ext.build_extensions(self)
def build_extensions(self, *args, **kwargs): compiler_type = self.compiler.compiler_type if compiler_type not in extra_args: compiler_type = 'unix' # probably some unix-like compiler # merge compile and link arguments with global arguments for current compiler for e in self.extensions: e.extra_compile_args = list(set(e.extra_compile_args + extra_args[compiler_type]['extra_compile_args'])) e.extra_link_args = list(set(e.extra_link_args + extra_args[compiler_type]['extra_link_args'])) _build_ext.build_extensions(self, *args, **kwargs)
def build_extensions(self): try: build_ext.build_extensions(self) except ext_errors: print("**************************************************") print("WARNING: Cython extensions failed to build. Falling back to pure Python implementation.\n" "See https://github.com/bsmurphy/PyKrige/issues/8 for more information.") print("**************************************************") raise BuildFailed()
def build_extensions(self): if use_openmp: compiler = self.compiler.compiler_type for ext in self.extensions: if compiler in BUILD_ARGS: ext.extra_compile_args = BUILD_ARGS[compiler] if compiler in LINK_ARGS: ext.extra_link_args = LINK_ARGS[compiler] build_ext.build_extensions(self)
def build_extensions(self): if (hasattr(self.compiler, 'compiler') and len(self.compiler.compiler) > 0): cc_name = self.compiler.compiler[0] stdcpp = '-std=c++11' if 'gcc' in cc_name and not _check_gcc_cpp11(cc_name): stdcpp = '-std=c++0x' for e in self.extensions: e.extra_compile_args.append(stdcpp) build_ext.build_extensions(self)
def build_extensions(self): customize_compiler(self.compiler) try: self.compiler.compiler_so.remove("-Wstrict-prototypes") except (AttributeError, ValueError): pass lto_flags=["-flto", "-flto-partition=none", "-fuse-linker-plugin", "-ffat-lto-objects"] self.compiler.compiler_so = [f for f in self.compiler.compiler_so if f not in lto_flags] build_ext.build_extensions(self)
def build_extensions(self): print 'Generate config.pxi' filename = os.path.join(os.path.dirname(__file__), 'config.pxi') with open(filename, 'w') as fd: for k, v in c_options.iteritems(): fd.write('DEF %s = %d\n' % (k.upper(), int(v))) build_ext.build_extensions(self) os.remove(filename) cppfilename = os.path.join(os.path.dirname(__file__), 'cyni.cpp') os.remove(cppfilename)
def build_extensions(self): if self.compiler.compiler_type in translator: trans = translator[self.compiler.compiler_type] else: trans = translator["default"] for e in self.extensions: e.extra_compile_args = [trans[a][0] if a in trans else a for a in e.extra_compile_args] e.extra_link_args = [trans[a][1] if a in trans else a for a in e.extra_link_args] e.libraries = [trans[arg] for arg in e.libraries if arg in trans] build_ext.build_extensions(self)
def build_extensions(self): c = self.compiler.compiler_type if c == "msvc": for e in self.extensions: e.extra_compile_args = ["/EHsc"] userdir = os.environ["USERPROFILE"] os.environ["INCLUDE"] = ( userdir + "\\AppData\\Local\\Programs\\Common\\Microsoft\\Visual C++ for Python\\9.0\\VC\\include\\" ) build_ext.build_extensions(self)
def build_extensions(self): numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') self.extensions = [ext for ext in self.extensions if ext.name != '__dummy__'] for ext in self.extensions: if (hasattr(ext, 'include_dirs') and numpy_incl not in ext.include_dirs): ext.include_dirs.append(numpy_incl) _build_ext.build_extensions(self)
def build_extensions(self): extra_compile_args = ['-w', '-Ofast'] cmd = "echo | %s -E - %s &>/dev/null" % ( self.compiler.compiler[0], " ".join(extra_compile_args)) try: subprocess.check_call(cmd, shell=True) except: extra_compile_args = ['-w', '-O3'] for e in self.extensions: e.extra_compile_args = extra_compile_args build_ext.build_extensions(self)
def build_extensions(self): # build files config_h_fn = ('graphics', 'config.h') config_pxi_fn = ('graphics', 'config.pxi') config_py_fn = ('setupconfig.py', ) # generate headers config_h = '// Autogenerated file for Kivy C configuration\n' config_h += '#define __PY3 {0}\n'.format(int(PY3)) config_pxi = '# Autogenerated file for Kivy Cython configuration\n' config_pxi += 'DEF PY3 = {0}\n'.format(int(PY3)) config_py = '# Autogenerated file for Kivy configuration\n' config_py += 'PY3 = {0}\n'.format(int(PY3)) config_py += 'CYTHON_MIN = {0}\nCYTHON_MAX = {1}\n'.format( repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING)) config_py += 'CYTHON_BAD = {0}\n'.format(repr(', '.join(map( str, CYTHON_UNSUPPORTED)))) # generate content print('Build configuration is:') for opt, value in c_options.items(): value = int(bool(value)) print(' * {0} = {1}'.format(opt, value)) opt = opt.upper() config_h += '#define __{0} {1}\n'.format(opt, value) config_pxi += 'DEF {0} = {1}\n'.format(opt, value) config_py += '{0} = {1}\n'.format(opt, value) debug = bool(self.debug) print(' * debug = {0}'.format(debug)) config_h += \ '#if __USE_GLEW && defined(_WIN32)\n# define GLEW_BUILD\n#endif' config_pxi += 'DEF DEBUG = {0}\n'.format(debug) config_py += 'DEBUG = {0}\n'.format(debug) for fn, content in ( (config_h_fn, config_h), (config_pxi_fn, config_pxi), (config_py_fn, config_py)): build_fn = expand(build_path, *fn) if self.update_if_changed(build_fn, content): print('Updated {}'.format(build_fn)) src_fn = expand(src_path, *fn) if src_fn != build_fn and self.update_if_changed(src_fn, content): print('Updated {}'.format(src_fn)) c = self.compiler.compiler_type print('Detected compiler is {}'.format(c)) if c != 'msvc': for e in self.extensions: e.extra_link_args += ['-lm'] if PY3: e.extra_link_args += ['-lpython3.5m'] build_ext.build_extensions(self)
def build_extensions(self): # First patch the files, the run the build patchCythonModules(self.build_lib) if cythonParallelBuild: # Run the parallel build, yay. self.check_extensions_list(self.extensions) from multiprocessing.pool import Pool Pool().map(cyBuildWrapper, ((self, ext) for ext in self.extensions)) else: # Run the normal non-parallel build. Cython_build_ext.build_extensions(self)
def build_extensions(self): # add path where 2to3 installed the transformed sources # and make sure Python (re-)imports them from there already_imported = [ module for module in sys.modules if module == 'Cython' or module.startswith('Cython.') ] for module in already_imported: del sys.modules[module] sys.path.insert(0, os.path.join(source_root, self.build_lib)) if profile: from Cython.Compiler.Options import directive_defaults directive_defaults['profile'] = True print("Enabled profiling for the Cython binary modules") build_ext_orig.build_extensions(self)
def build_extensions(self): if self.compiler.compiler_type in translator: trans = translator[self.compiler.compiler_type] else: trans = translator['default'] for e in self.extensions: e.extra_compile_args = [ trans[a][0] if a in trans else a for a in e.extra_compile_args] e.extra_link_args = [ trans[a][1] if a in trans else a for a in e.extra_link_args] e.libraries = list(filter(None, [ trans[a] if a in trans else None for a in e.libraries])) build_ext.build_extensions(self)
def build_extensions(self): try: build_ext.build_extensions(self) except ext_errors: print("**************************************************") print("WARNING: Cython extensions failed to build (used in abel.direct).\n" "Typical reasons for this problem are:\n" " - a C compiler is not installed or not found\n" " - issues using mingw compiler on Windows 64bit (experimental support for now)\n" "This only means that the abel.direct C implementation will not be available.\n") print("**************************************************") # continue the install pass except: raise
def build_extensions(self): ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) if ct == 'unix': opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version()) opts.append(cpp_flag(self.compiler)) if has_flag(self.compiler, '-fopenmp'): opts.append('-fopenmp') if has_flag(self.compiler, '-fvisibility=hidden'): opts.append('-fvisibility=hidden') elif ct == 'msvc': opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version()) for ext in self.extensions: ext.extra_compile_args = opts build_ext.build_extensions(self)
def build_extensions(self): print "Generate config.h" config_h = join(dirname(__file__), "kivy", "graphics", "config.h") with open(config_h, "w") as fd: fd.write("// Autogenerated file for Kivy C configuration\n") for k, v in c_options.iteritems(): fd.write("#define __%s %d\n" % (k.upper(), int(v))) print "Generate config.pxi" config_pxi = join(dirname(__file__), "kivy", "graphics", "config.pxi") with open(config_pxi, "w") as fd: fd.write("# Autogenerated file for Kivy Cython configuration\n") for k, v in c_options.iteritems(): fd.write("DEF %s = %d\n" % (k.upper(), int(v))) build_ext.build_extensions(self)
def build_extensions(self): """ Lazily append numpy's include directory to Extension includes. This is done here rather than at module scope because setup.py may be run before numpy has been installed, in which case importing numpy and calling `numpy.get_include()` will fail. """ numpy_incl = resource_filename('numpy', 'core/include') for ext in self.extensions: ext.include_dirs.append(numpy_incl) # This explicitly calls the superclass method rather than the # usual super() invocation because distutils' build_class, of # which Cython's build_ext is a subclass, is an old-style class # in Python 2, which doesn't support `super`. cython_build_ext.build_extensions(self)
def build_extensions(self): print 'Generate config.h' config_h = join(dirname(__file__), 'kivy', 'graphics', 'config.h') with open(config_h, 'w') as fd: fd.write('// Autogenerated file for Kivy C configuration\n') for k, v in c_options.iteritems(): fd.write('#define __%s %d\n' % (k.upper(), int(v))) print 'Generate config.pxi' config_pxi = join(dirname(__file__), 'kivy', 'graphics', 'config.pxi') with open(config_pxi, 'w') as fd: fd.write('# Autogenerated file for Kivy Cython configuration\n') for k, v in c_options.iteritems(): fd.write('DEF %s = %d\n' % (k.upper(), int(v))) build_ext.build_extensions(self)
def build_extensions(self): # build files config_h_fn = ("graphics", "config.h") config_pxi_fn = ("graphics", "config.pxi") config_py_fn = ("setupconfig.py",) # generate headers config_h = "// Autogenerated file for Kivy C configuration\n" config_h += "#define __PY3 {0}\n".format(int(PY3)) config_pxi = "# Autogenerated file for Kivy Cython configuration\n" config_pxi += "DEF PY3 = {0}\n".format(int(PY3)) config_py = "# Autogenerated file for Kivy configuration\n" config_py += "PY3 = {0}\n".format(int(PY3)) config_py += "CYTHON_MIN = {0}\nCYTHON_MAX = {1}\n".format(repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING)) config_py += "CYTHON_BAD = {0}\n".format(repr(", ".join(map(str, CYTHON_UNSUPPORTED)))) # generate content print("Build configuration is:") for opt, value in c_options.items(): value = int(bool(value)) print(" * {0} = {1}".format(opt, value)) opt = opt.upper() config_h += "#define __{0} {1}\n".format(opt, value) config_pxi += "DEF {0} = {1}\n".format(opt, value) config_py += "{0} = {1}\n".format(opt, value) debug = bool(self.debug) print(" * debug = {0}".format(debug)) config_h += "#if __USE_GLEW && defined(_WIN32)\n# define GLEW_BUILD\n#endif" config_pxi += "DEF DEBUG = {0}\n".format(debug) config_py += "DEBUG = {0}\n".format(debug) for fn, content in ((config_h_fn, config_h), (config_pxi_fn, config_pxi), (config_py_fn, config_py)): build_fn = expand(build_path, *fn) if self.update_if_changed(build_fn, content): print("Updated {}".format(build_fn)) src_fn = expand(src_path, *fn) if src_fn != build_fn and self.update_if_changed(src_fn, content): print("Updated {}".format(src_fn)) c = self.compiler.compiler_type print("Detected compiler is {}".format(c)) if c != "msvc": for e in self.extensions: e.extra_link_args += ["-lm"] build_ext.build_extensions(self)
def build_extensions(self): global library_dirs, include_dirs if WINDOWS: # Detect the compiler so we can specify the correct command line switches # and libraries from distutils.cygwinccompiler import Mingw32CCompiler extra_cc_args = [] # Distutils bug: self.compiler can be a string or a CCompiler # subclass instance, see http://bugs.python.org/issue6377 if isinstance(self.compiler, str): compiler = self.compiler elif isinstance(self.compiler, Mingw32CCompiler): compiler = 'mingw32' freetds_dir = 'ming' extra_cc_args = [ '-Wl,-allow-multiple-definition', '-Wl,-subsystem,windows-mthreads', '-mwindows', '-Wl,--strip-all' ] libraries = [ 'libiconv', 'iconv', 'sybdb', 'ws2_32', 'wsock32', 'kernel32', ] else: compiler = 'msvc' freetds_dir = 'vs2008' libraries = [ 'db-lib', 'tds', 'ws2_32', 'wsock32', 'kernel32', 'shell32', ] FREETDS = osp.join(ROOT, 'freetds', '{0}_{1}'.format(freetds_dir, BITNESS)) for e in self.extensions: e.extra_compile_args.extend(extra_cc_args) e.libraries.extend(libraries) e.include_dirs.append(osp.join(FREETDS, 'include')) e.library_dirs.append(osp.join(FREETDS, 'lib')) else: libraries = [ "sybdb" ] # on Mandriva you may have to change it to sybdb_mssql for e in self.extensions: e.libraries.extend(libraries) _build_ext.build_extensions(self)
def build_extensions(self): if self.compiler.compiler_type == 'msvc': bitness = struct.calcsize("P") * 8 print "Compiling for", bitness, "bit environment" for e in self.extensions: e.include_dirs = [numpy.get_include(), os.path.join('fftw_wrapper'),] e.extra_compile_args = ['/openmp'] e.libraries = [os.path.join({32 : 'fftw_32', 64: 'fftw_64'}[bitness], 'libfftw3-3')] else: for e in self.extensions: e.include_dirs = [numpy.get_include(), os.path.join('fftw_wrapper'),] e.extra_compile_args = ['-fopenmp'] e.extra_link_args = ['-fopenmp', '-lm', '-lfftw3', '-lfftw3_threads'] build_ext.build_extensions(self)
def build_extensions(self): if self.compiler.compiler_type == 'mingw32': customize_mingw(self.compiler) return build_ext_c.build_extensions(self)
def build_extensions(self): # customize_compiler_for_nvcc(self.compiler) # MSVCComplier doesn't support this. self.compiler.src_extensions.append('.cu') build_ext.build_extensions(self)
def build_extensions(self): # This code is adapted from # scikit-learn/sklearn/_build_utils/openmp_helpers.py # (version last updated on 13 Nov 2019; 9876f74) # See https://github.com/scikit-learn/scikit-learn and https://scikit-learn.org/. if hasattr(self.compiler, 'compiler'): compiler = self.compiler.compiler[0] else: compiler = self.compiler.__class__.__name__ if sys.platform == "win32" and ('icc' in compiler or 'icl' in compiler): for e in self.extensions: e.extra_compile_args += ['/Qopenmp', '/Qstd=c++11'] e.extra_link_args += ['/Qopenmp'] elif sys.platform == "win32": for e in self.extensions: e.extra_compile_args += ['/openmp'] e.extra_link_args += ['/openmp'] elif sys.platform == "darwin" and ('icc' in compiler or 'icl' in compiler): for e in self.extensions: e.extra_compile_args += ['-openmp', '-std=c++11'] e.extra_link_args += ['-openmp'] elif sys.platform == "darwin": # and 'openmp' in os.getenv('CPPFLAGS', ''): # -fopenmp can't be passed as compile flag when using Apple-clang. # OpenMP support has to be enabled during preprocessing. # # For example, our macOS wheel build jobs use the following environment # variables to build with Apple-clang and the brew installed "libomp": # # export CPPFLAGS="$CPPFLAGS -Xpreprocessor -fopenmp" # export CFLAGS="$CFLAGS -I/usr/local/opt/libomp/include" # export CXXFLAGS="$CXXFLAGS -I/usr/local/opt/libomp/include" # export LDFLAGS="$LDFLAGS -Wl,-rpath,/usr/local/opt/libomp/lib # -L/usr/local/opt/libomp/lib -lomp" for e in self.extensions: e.extra_compile_args += ['-std=c++11'] pass elif sys.platform == "linux": # Default flag for GCC and clang: for e in self.extensions: e.extra_compile_args += ['-fopenmp', '-std=c++11'] e.extra_link_args += ['-fopenmp'] else: pass # Old version: # c = self.compiler.compiler_type # if c == "msvc": # for e in self.extensions: # e.extra_compile_args += "/openmp" # elif c == "mingw32": # for e in self.extensions: # e.extra_compile_args += "-fopenmp" # e.extra_link_args += "-fopenmp" # elif c == "unix": # # Well... gcc/clang has -fopenmp, # # icc has -openmp, oracle has -xopenmp, etc. # # The user should specify CXXFLAGS and LDFLAGS herself, I think. # pass build_ext.build_extensions(self)
def build_extensions(self): """ Specifies compiler and linker flags depending on the compiler. .. warning:: DO NOT USE '-march=native' flag. By using this flag, the compiler optimizes the instructions for the native machine of the build time and the executable will not be backward compatible to older CPUs. As a result, the package will not be distributable on other machines as the installation with the binary wheel crashes on other machines with this error: 'illegal instructions (core dumped).' An alternative optimization flag is '-mtune=native', which is backward compatible and the package can be installed using wheel binary file. """ # Get compiler type. This is "unix" (linux, mac) or "msvc" (windows) compiler_type = self.compiler.compiler_type # Initialize flags extra_compile_args = [] extra_link_args = [] if compiler_type == 'msvc': # This is Microsoft Windows Visual C++ compiler msvc_compile_args = ['/O2', '/Wall', '/openmp'] msvc_link_args = [] msvc_has_openmp_flag = check_compiler_has_flag( self.compiler, msvc_compile_args, msvc_link_args) if msvc_has_openmp_flag: # Add flags extra_compile_args += msvc_compile_args extra_link_args += msvc_link_args else: # It does not seem msvc accept -fopenmp flag. raise RuntimeError(textwrap.dedent( """ OpenMP does not seem to be available on %s compiler. """ % compiler_type)) else: # The compile_type is 'unix'. This is either linux or mac. # We add common flags that work both for gcc and mac's clang extra_compile_args += ['-O3', '-fno-stack-protector', '-Wall'] # The option '-Wl, ..' will send arguments ot the linker. Here, # '--strip-all' will remove all symbols from the shared library. if not debug_mode: extra_compile_args += ['-g0', '-Wl, --strip-all'] # Assume compiler is gcc (we do not know yet). Check if the # compiler accepts '-fopenmp' flag. Note: clang in mac does not # accept this flag alone, but gcc does. gcc_compile_args = ['-fopenmp'] gcc_link_args = ['-fopenmp'] gcc_has_openmp_flag = check_compiler_has_flag( self.compiler, gcc_compile_args, gcc_link_args) if gcc_has_openmp_flag: # Assuming this is gcc. Add '-fopenmp' safely. extra_compile_args += gcc_compile_args extra_link_args += gcc_link_args else: # Assume compiler is clang (we do not know yet). Check if # -fopenmp can be passed through preprocessor. This is how # clang compiler accepts -fopenmp. clang_compile_args = ['-Xpreprocessor', '-fopenmp'] clang_link_args = ['-Xpreprocessor', '-fopenmp', '-lomp'] clang_has_openmp_flag = check_compiler_has_flag( self.compiler, clang_compile_args, clang_link_args) if clang_has_openmp_flag: # Assuming this is mac's clag. Add '-fopenmp' through # preprocessor extra_compile_args += clang_compile_args extra_link_args += clang_link_args else: # It doesn't seem either gcc or clang accept -fopenmp flag. raise RuntimeError(textwrap.dedent( """ OpenMP does not seem to be available on %s compiler. """ % compiler_type)) # Modify compiler flags for cuda if use_cuda: # Compile flags for nvcc if sys.platform == 'win32': extra_compile_args_nvcc = ['/Ox'] else: extra_compile_args_nvcc = ['-arch=sm_35', '--ptxas-options=-v', '-c', '--compiler-options', '-fPIC', '-O3', '--verbose', '--shared'] # The option '-Wl, ..' will send arguments ot the linker. Here, # '--strip-all' will remove all symbols from the shared library. if debug_mode: extra_compile_args_nvcc += ['-g', '-G'] else: extra_compile_args_nvcc += ['--linker-options', '--strip-all'] # Redefine extra_compile_args list to be a dictionary extra_compile_args = { 'not_nvcc': extra_compile_args, 'nvcc': extra_compile_args_nvcc } # Add the flags to all extensions for ext in self.extensions: ext.extra_compile_args = extra_compile_args ext.extra_link_args = extra_link_args # Parallel compilation (can also be set via build_ext -j or --parallel) # Note: parallel build fails in windows since object files are accessed # by race condition. # if sys.platform != 'win32': # self.parallel = multiprocessing.cpu_count() # Modify compiler for cuda if use_cuda: cuda = locate_cuda() if sys.platform == 'win32': customize_windows_compiler_for_nvcc(self.compiler, cuda) else: customize_unix_compiler_for_nvcc(self.compiler, cuda) # Remove warning: command line option '-Wstrict-prototypes' is valid # for C/ObjC but not for C++ try: if '-Wstrict-prototypes' in self.compiler.compiler_so: self.compiler.compiler_so.remove('-Wstrict-prototypes') except (AttributeError, ValueError): pass # Call parent class to build build_ext.build_extensions(self)
def build_extensions(self): build_ext.build_extensions(self)
def build_extensions(self): # Register .cu and .cuh as valid source extensions. self.compiler.src_extensions += [".cu", ".cuh"] # Save the original _compile method for later. if self.compiler.compiler_type == "msvc": self.compiler._cpp_extensions += [".cu", ".cuh"] original_compile = self.compiler.compile original_spawn = self.compiler.spawn else: original_compile = self.compiler._compile def unix_wrap_compile(obj, src, ext, cc_args, extra_postargs, pp_opts): # Copy before we make any modifications. cflags = copy.deepcopy(extra_postargs) try: original_compiler = self.compiler.compiler_so if _is_cuda_file(src): nvcc = _join_cuda_home("bin", "nvcc") if not isinstance(nvcc, list): nvcc = [nvcc] self.compiler.set_executable("compiler_so", nvcc) if isinstance(cflags, dict): cflags = cflags["nvcc"] cflags = (COMMON_NVCC_FLAGS + ["--compiler-options", "'-fPIC'"] + cflags + COMPUTE_CAPABILITY_ARGS) elif isinstance(cflags, dict): cflags = cflags["cxx"] # NVCC does not allow multiple -std to be passed, so we avoid # overriding the option if the user explicitly passed it. if not any(flag.startswith("-std=") for flag in cflags): cflags.append("-std=c++11") original_compile(obj, src, ext, cc_args, cflags, pp_opts) finally: # Put the original compiler back in place. self.compiler.set_executable("compiler_so", original_compiler) def win_wrap_compile( sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None, ): cflags = copy.deepcopy(extra_postargs) extra_postargs = None def spawn(cmd, cflags): # Using regex to match src, obj and include files src_regex = re.compile("/T(p|c)(.*)") src_list = [ m.group(2) for m in (src_regex.match(elem) for elem in cmd) if m ] obj_regex = re.compile("/Fo(.*)") obj_list = [ m.group(1) for m in (obj_regex.match(elem) for elem in cmd) if m ] include_regex = re.compile(r"((\-|\/)I.*)") include_list = [ m.group(1) for m in (include_regex.match(elem) for elem in cmd) if m ] if len(src_list) >= 1 and len(obj_list) >= 1: src = src_list[0] obj = obj_list[0] if _is_cuda_file(src): nvcc = _join_cuda_home("bin", "nvcc") if isinstance(cflags, dict): cflags = cflags["nvcc"] elif not isinstance(cflags, list): cflags = [] cflags = COMMON_NVCC_FLAGS + cflags + COMPUTE_CAPABILITY_ARGS for flag in COMMON_MSVC_FLAGS: cflags = ["-Xcompiler", flag] + cflags for macro in macros: if len(macro) == 2: if macro[1] == None: cflags += ["--define-macro", macro[0]] else: cflags += [ "--define-macro", "{}={}".format(macro[0], macro[1]) ] elif len(macro) == 1: cflags += ["--undefine-macro", macro[0]] cmd = [nvcc, "-c", src, "-o", obj ] + include_list + cflags elif isinstance(cflags, dict): cflags = COMMON_MSVC_FLAGS # + self.cflags['cxx'] cmd += cflags elif isinstance(cflags, list): cflags = COMMON_MSVC_FLAGS + cflags cmd += cflags return original_spawn(cmd) try: self.compiler.spawn = lambda cmd: spawn(cmd, cflags) return original_compile( sources, output_dir, macros, include_dirs, debug, extra_preargs, extra_postargs, depends, ) finally: self.compiler.spawn = original_spawn # Monkey-patch the _compile method. if self.compiler.compiler_type == "msvc": self.compiler.compile = win_wrap_compile else: self.compiler._compile = unix_wrap_compile build_ext.build_extensions(self)
def build_extensions(self): customize_compiler(self.compiler) build_ext.build_extensions(self)
def build_extensions(self): print("Running custom build_ext") numpy_incl = pkg_resources.resource_filename("numpy", "core/include") if sys.platform == "darwin": # Platform: Mac OS version, _, _ = platform.mac_ver() parts = version.split(".") major = int(parts[0]) minor = int(parts[1]) patch = int(parts[2]) if len(parts) == 3 else None if minor >= 15: # Greater than Mac OS: 10.15 extra_compile_args = [ "-I/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/" ] elif minor >= 10: # Greater than Mac OS: 10.10 extra_compile_args = [ "-I/System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vecLib.framework/Versions/Current/Headers" ] else: extra_compile_args = [ "-I/System/Library/Frameworks/vecLib.framework/Headers" ] ext_module = Extension( name="st_dsne", sources=[ "dsne/includes/st_dsne/dsne.cpp", "dsne/st_dsne.pyx", ], include_dirs=["dsne/includes/st_dsne/"], extra_compile_args=extra_compile_args, extra_link_args=[ "-Wl,-framework", "-Wl,Accelerate", "-lcblas" ], language="c++", ) self.extensions.append(ext_module) else: # Platform: Linux extra_link_args = ["-lcblas"] ext_module = Extension( name="st_dsne", sources=[ "dsne/includes/st_dsne/dsne.cpp", "dsne/st_dsne.pyx", ], include_dirs=["/usr/local/include", "dsne/includes/st_dsne/"], library_dirs=["/usr/local/lib"], extra_compile_args=["-msse2", "-O3", "-fPIC", "-w"], extra_link_args=extra_link_args, language="c++", ) self.extensions.append(ext_module) self.extensions = [ ext for ext in self.extensions if ext.name != "__dummy__" ] for ext in self.extensions: if hasattr(ext, "include_dirs") and numpy_incl not in ext.include_dirs: ext.include_dirs.append(numpy_incl) _build_ext.build_extensions(self)
def build_extensions(self): compiler = self.compiler.compiler_type args = BUILD_ARGS[compiler] for ext in self.extensions: ext.extra_compile_args = args build_ext.build_extensions(self)
def build_extensions(self): self.extensions = my_cythonize(self.extensions) return build_ext.build_extensions(self)
def build_extensions(self): fully_define_extension(self) customize_compiler_for_nvcc(self.compiler) build_ext.build_extensions(self)
def build_extensions(self): customize_compiler_for_nvcc(self.compiler) build_ext.build_extensions(self)
def build_extensions(self): # First patch the files, the run the normal build patchCythonModules(self.build_lib) Cython_build_ext.build_extensions(self)
def build_extensions(self): # Register .cu and .cuh as valid source extensions. self.compiler.src_extensions += ['.cu', '.cuh'] # Save the original _compile method for later. if self.compiler.compiler_type == 'msvc': self.compiler._cpp_extensions += ['.cu', '.cuh'] original_compile = self.compiler.compile original_spawn = self.compiler.spawn else: original_compile = self.compiler._compile def unix_wrap_compile(obj, src, ext, cc_args, extra_postargs, pp_opts): # Copy before we make any modifications. cflags = copy.deepcopy(extra_postargs) try: original_compiler = self.compiler.compiler_so if _is_cuda_file(src): nvcc = _join_cuda_home('bin', 'nvcc') if not isinstance(nvcc, list): nvcc = [nvcc] self.compiler.set_executable('compiler_so', nvcc) if isinstance(cflags, dict): cflags = cflags['nvcc'] cflags = COMMON_NVCC_FLAGS + [ '--compiler-options', "'-fPIC'" ] + cflags + COMPUTE_CAPABILITY_ARGS elif isinstance(cflags, dict): cflags = cflags['cxx'] # NVCC does not allow multiple -std to be passed, so we avoid # overriding the option if the user explicitly passed it. if not any(flag.startswith('-std=') for flag in cflags): cflags.append('-std=c++11') original_compile(obj, src, ext, cc_args, cflags, pp_opts) finally: # Put the original compiler back in place. self.compiler.set_executable('compiler_so', original_compiler) def win_wrap_compile(sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): cflags = copy.deepcopy(extra_postargs) extra_postargs = None def spawn(cmd, cflags): # Using regex to match src, obj and include files src_regex = re.compile('/T(p|c)(.*)') src_list = [ m.group(2) for m in (src_regex.match(elem) for elem in cmd) if m ] obj_regex = re.compile('/Fo(.*)') obj_list = [ m.group(1) for m in (obj_regex.match(elem) for elem in cmd) if m ] include_regex = re.compile(r'((\-|\/)I.*)') include_list = [ m.group(1) for m in (include_regex.match(elem) for elem in cmd) if m ] if len(src_list) >= 1 and len(obj_list) >= 1: src = src_list[0] obj = obj_list[0] if _is_cuda_file(src): nvcc = _join_cuda_home('bin', 'nvcc') if isinstance(cflags, dict): cflags = cflags['nvcc'] elif not isinstance(cflags, list): cflags = [] cflags = COMMON_NVCC_FLAGS + cflags + COMPUTE_CAPABILITY_ARGS for flag in COMMON_MSVC_FLAGS: cflags = ['-Xcompiler', flag] + cflags cmd = [nvcc, '-c', src, '-o', obj ] + include_list + cflags elif isinstance(cflags, dict): cflags = COMMON_MSVC_FLAGS #+ self.cflags['cxx'] cmd += cflags elif isinstance(cflags, list): cflags = COMMON_MSVC_FLAGS + cflags cmd += cflags return original_spawn(cmd) try: self.compiler.spawn = lambda cmd: spawn(cmd, cflags) return original_compile(sources, output_dir, macros, include_dirs, debug, extra_preargs, extra_postargs, depends) finally: self.compiler.spawn = original_spawn # Monkey-patch the _compile method. if self.compiler.compiler_type == 'msvc': self.compiler.compile = win_wrap_compile else: self.compiler._compile = unix_wrap_compile build_ext.build_extensions(self)
def build_extensions(self): if self.distribution.disable_ext: return return build_ext.build_extensions(self)
def build_extensions(self): build_ext_options.build_options(self) build_ext.build_extensions(self)
def build_extensions(self): ## Lookup blas files and headers first nocblas_err_msg = "\n\nNo CBLAS library found - please install one with e.g. " nocblas_err_msg += "'sudo apt-get install libopenblas-dev' (or 'intel-mkl-full') (Debian/Ubuntu)" nocblas_err_msg += ", 'conda install mkl-devel' (Linux / Mac)" nocblas_err_msg += ", or 'pip install mkl-devel' (Windows / Linux / Mac)." from_rtd = os.environ.get('READTHEDOCS') == 'True' if not from_rtd: blas_path, blas_file, incl_path, incl_file, flags = findblas.find_blas( ) if (blas_file is None) or (blas_path is None): raise ValueError(nocblas_err_msg) elif blas_file == "mkl_rt.dll": txt = "Found MKL library at:\n" + os.path.join( blas_path, blas_file) txt += "\nHowever, it is missing .lib files - please install them with 'pip install mkl-devel'." raise ValueError(txt) elif bool(re.search(r"\.dll$", blas_file)): txt = "Found BLAS library at:\n" + os.path.join( blas_path, blas_file) txt += "\nBut .lib files are missing! Please reinstall it (e.g. 'pip install mkl-devel')." raise ValueError(txt) else: if platform[:3] != "win": print("Installation: Using BLAS library found in:\n" + os.path.join(blas_path, blas_file) + "\n\n") else: flags = ['_FOR_RTD'] blas_path, blas_file, incl_path, incl_file = [None] * 4 ## if no CBLAS and no functions are present, there will be no prototypes for the cblas API if "NO_CBLAS" in flags: raise ValueError(nocblas_err_msg) ## Add findblas' header ## if installing with pip or setuptools, will be here (this is the ideal case) if os.path.exists( re.sub(r"__init__\.py$", "findblas.h", findblas.__file__)): finblas_head_fold = re.sub(r"__init__\.py$", "", findblas.__file__) ## if installing with distutils, will be placed here (this should ideally not happen) elif os.path.exists(os.path.join(sys.prefix, "include", "findblas.h")): finblas_head_fold = os.path.join(sys.prefix, "include") elif os.path.exists(os.path.join(sys.prefix, "findblas.h")): finblas_head_fold = sys.prefix ## if on a PEP518 environment, might be located elsewhere else: candidate_paths = [sys.prefix] try: candidate_paths.append(os.environ['PYTHONPATH']) except: pass if platform[:3] == "win": candidate_paths += os.environ['PATH'].split(";") else: candidate_paths += os.environ['PATH'].split(":") for path in candidate_paths: if bool(re.search(r"[Oo]verlay", path)): clean_path = re.sub(r"^(.*[Oo]verlay).*$", r"\1", path) if os.path.exists( os.path.join(clean_path, "include", "findblas.h")): finblas_head_fold = os.path.join(clean_path, "include") break elif os.path.exists(os.path.join(clean_path, "findblas.h")): finblas_head_fold = clean_path break elif os.path.exists( os.path.join(clean_path, "site-packages", "findblas", "findblas.h")): finblas_head_fold = os.path.join( clean_path, "site-packages", "findblas") break elif os.path.exists( os.path.join(clean_path, "Lib", "site-packages", "findblas", "findblas.h")): finblas_head_fold = os.path.join( clean_path, "Lib", "site-packages", "findblas") break elif os.path.exists( os.path.join(clean_path, "lib", "site-packages", "findblas", "findblas.h")): finblas_head_fold = os.path.join( clean_path, "lib", "site-packages", "findblas") break ## if still not found, try to get it from pip itself else: import pip import io from contextlib import redirect_stdout pip_outp = io.StringIO() with redirect_stdout(pip_outp): pip.main(['show', '-f', 'findblas']) pip_outp = pip_outp.getvalue() pip_outp = pip_outp.split("\n") for ln in pip_outp: if bool(re.search(r"^Location", ln)): files_root = re.sub(r"^Location:\s+", "", ln) break for ln in pip_outp: if bool(re.search(r"findblas\.h$", ln)): finblas_head_fold = os.path.join( files_root, re.sub(r"^(.*)[/\\]*findblas\.h$", r"\1", ln)) break ## if the header file doesn't exist, shall raise en error else: raise ValueError( "Could not find header file from 'findblas' - please try reinstalling with 'pip install --force findblas'" ) ## Pass extra flags for the header warning_msg = "No CBLAS headers were found - function propotypes might be unreliable." mkl_err_msg = "Missing MKL CBLAS headers, please reinstall with e.g. 'conda install mkl-include' or 'pip install mkl-include'." gsl_err_msg = "Missing GSL CBLAS headers, please reinstall with e.g. 'conda install gsl'." if incl_file == "mkl_cblas.h": flags.append("MKL_OWN_INCL_CBLAS") elif incl_file == "mkl_blas.h": raise ValueError(mkl_err_msg) elif incl_file == "cblas-openblas.h": flags.append("OPENBLAS_OWN_INCL") elif incl_file == "gsl_cblas.h": flags.append("GSL_OWN_INCL_CBLAS") elif incl_file == "gsl_blas.h": raise ValueError(gsl_err_msg) elif incl_file == "INCL_CBLAS": flags.append("INCL_CBLAS") elif incl_file == "blas.h": flags.append("INCL_BLAS") warnings.warn(warning_msg) elif (incl_path is None) or (incl_file is None): flags.append("NO_CBLAS_HEADER") warnings.warn(warning_msg) else: pass ## Now add them to the extension for e in self.extensions: if not from_rtd: if self.compiler.compiler_type == 'msvc': # visual studio e.extra_link_args += [os.path.join(blas_path, blas_file)] else: # everything else which cares about following standards if platform[:3] != "dar": e.extra_link_args += [ "-L" + blas_path, "-l:" + blas_file ] else: blas_shortened = re.sub(r"^lib", "", blas_file) blas_shortened = re.sub(r"\.[A-Za-z]+$", "", blas_shortened) e.extra_link_args += [ "-L" + blas_path, "-l" + blas_shortened ] if bool(re.search(r"\.a$", blas_file)): if (bool(re.search(r"gsl", blas_file))): e.extra_link_args += ["-lgslcblas"] else: e.extra_link_args += ["-lcblas", "-lblas"] else: if platform[:3] == "dar": e.extra_link_args += ["-Wl,-rpath," + blas_path] else: e.extra_link_args += ["-Wl,-rpath=" + blas_path] else: e.sources.append(os.path.join(finblas_head_fold, "rtd_mock.c")) e.define_macros += [(f, None) for f in flags] if incl_path is not None: e.include_dirs.append(incl_path) e.include_dirs.append(finblas_head_fold) build_ext.build_extensions(self)
def build_extensions(self): self._update_extensions() build_ext.build_extensions(self)