def build_extensions(self): ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) if ct == 'unix': opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version()) opts.append(cpp_flag(self.compiler)) if has_flag(self.compiler, '-fvisibility=hidden'): opts.append('-fvisibility=hidden') elif ct == 'msvc': opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version()) # extend include dirs here (don't assume numpy/pybind11 are installed when first run, since # pip could have installed them as part of executing this script import pybind11 import numpy as np for ext in self.extensions: ext.extra_compile_args.extend(opts) ext.extra_link_args.extend(self.link_opts.get(ct, [])) ext.include_dirs.extend([ # Path to pybind11 headers pybind11.get_include(), pybind11.get_include(True), # Path to numpy headers np.get_include() ]) build_ext.build_extensions(self)
def build_extensions(self): from numpy.distutils.misc_util import get_numpy_include_dirs for e in self.extensions: e.include_dirs.extend(get_numpy_include_dirs()) build_ext.build_extensions(self)
def build_extensions(self): c = self.compiler.compiler_type if self.omp not in ('0', '1', True, False): raise ValueError("Invalid omp argument. Mut be '0' or '1'.") self.omp = int(self.omp) if self.omp: if not hasattr(self, "_printed_omp_message"): self._printed_omp_message = True print("\n#################################") print("# Compiling with OpenMP support #") print("#################################\n") # More portable to pass -fopenmp to linker. # self.libraries += ['gomp'] if self.compiler.compiler_type == 'msvc': openmpflag = '/openmp' compileflags = COMPILE_FLAGS_MSVC else: openmpflag = '-fopenmp' compileflags = COMPILE_FLAGS for e in self.extensions: e.extra_compile_args = list(set(e.extra_compile_args).union(compileflags)) if openmpflag not in e.extra_compile_args: e.extra_compile_args += [openmpflag] if openmpflag not in e.extra_link_args: e.extra_link_args += [openmpflag] build_ext_.build_extensions(self)
def build_extensions(self): # set compiler flags c = self.compiler.compiler_type if self.copt.has_key(c): for e in self.extensions: e.extra_compile_args = self.copt[ c ] if self.lopt.has_key(c): for e in self.extensions: e.extra_link_args = self.lopt[ c ] # handle numpy if not disabled_numpy: try: import numpy jpypeLib.define_macros.append(('HAVE_NUMPY', 1)) jpypeLib.include_dirs.append(numpy.get_include()) warnings.warn("Turned ON Numpy support for fast Java array access", FeatureNotice) except ImportError: pass else: warnings.warn("Turned OFF Numpy support for fast Java array access", FeatureNotice) # has to be last call build_ext.build_extensions(self)
def build_extensions(self): """ Sets up the compiler arguments """ try: # Filter compiler arguments self.compiler.compiler = [arg for arg in self.compiler.compiler if arg not in self.ignored_arguments] self.compiler.compiler_so = [arg for arg in self.compiler.compiler_so if arg not in self.ignored_arguments] except AttributeError: # Incompatible compiler pass compiler = self.compiler.compiler_type if compiler in self.compile_args: for ext in self.extensions: ext.extra_compile_args = self.compile_args[compiler] if compiler in self.linker_args: for ext in self.extensions: ext.extra_link_args = self.linker_args[compiler] build_ext.build_extensions(self)
def build_extensions(self): # Get 64-bitness c = self.compiler.compiler_type print("Compiling with %s (64bit=%s)" % (c, str(is_64bits))) #print("=== compiler attributes ===") #print("\n".join("%s: %s"%(k, v) for k, v in sorted(self.compiler.__dict__.items()))) #print("=== build_ext attributes ===") #print("\n".join("%s: %s"%(k, v) for k, v in self.__dict__.items())) #sys.exit(1) # OpenMP build options if enable_openmp: if c in copt: for e in self.extensions: e.extra_compile_args = copt[c] if c in lopt: for e in self.extensions: e.extra_link_args = lopt[c] # Platform-specific build options if c in platform_lopt: for e in self.extensions: e.extra_link_args = platform_lopt[c] if c in platform_copt: for e in self.extensions: e.extra_compile_args = platform_copt[c] build_ext.build_extensions(self)
def build_extensions(self): if sys.platform == 'darwin': all_flags = ['-stdlib=libc++', '-mmacosx-version-min=10.7'] if has_flag(self.compiler, [all_flags[0]]): self.c_opts['unix'] += [all_flags[0]] elif has_flag(self.compiler, all_flags): self.c_opts['unix'] += all_flags else: raise RuntimeError( 'libc++ is needed! Failed to compile with {} and {}.'. format(" ".join(all_flags), all_flags[0]) ) ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) extra_link_args = [] if coverage: coverage_option = '--coverage' opts.append(coverage_option) extra_link_args.append(coverage_option) if ct == 'unix': opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version()) opts.append(cpp_flag(self.compiler)) if has_flag(self.compiler, ['-fvisibility=hidden']): opts.append('-fvisibility=hidden') elif ct == 'msvc': opts.append( '/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version() ) for ext in self.extensions: ext.extra_compile_args = opts ext.extra_link_args = extra_link_args build_ext.build_extensions(self)
def build_extensions(self): ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) link_args = [] if ct == 'unix': opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version()) opts.append(cpp_flag(self.compiler)) if has_flag(self.compiler, '-fvisibility=hidden'): opts.append('-fvisibility=hidden') elif ct == 'msvc': opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version()) if has_flag(self.compiler, '-fopenmp'): opts.append('-fopenmp') link_args.append('-fopenmp') elif has_flag(self.compiler, '-openmp'): opts.append('-openmp') link_args.append('-openmp') for ext in self.extensions: ext.extra_compile_args = opts ext.extra_link_args = link_args build_ext.build_extensions(self)
def build_extensions(self): # Stop GCC complaining about -Wstrict-prototypes in C++ code try: self.compiler.compiler_so.remove('-Wstrict-prototypes') except ValueError: pass build_ext.build_extensions(self)
def build_extensions(self): self.compiler.src_extensions.append(".cu") self.compiler.set_executable("compiler_so", "nvcc") self.compiler.set_executable("linker_so", "nvcc --shared") if hasattr(self.compiler, "_c_extensions"): self.compiler._c_extensions.append(".cu") # needed for Windows self.compiler.spawn = self.spawn build_ext.build_extensions(self)
def build_extensions(self): compiler = self.compiler.compiler_type print('COMPILER',compiler) args = BUILD_ARGS[compiler] for ext in self.extensions: ext.extra_compile_args = args print('COMPILER ARGUMENTS',ext.extra_compile_args) _build_ext.build_extensions(self)
def build_extensions(self): if sys.platform == "win32": from distutils.msvccompiler import MSVCCompiler if isinstance(self.compiler, MSVCCompiler): # disable Language Extensions not compatible with ANSI C for ext in self.extensions: ext.extra_compile_args.append("/Za") build_ext.build_extensions(self)
def build_extensions(self): self.compiler.src_extensions.append('.cu') self.compiler.set_executable('compiler_so', 'nvcc') self.compiler.set_executable('linker_so', 'nvcc --shared') if hasattr(self.compiler, '_c_extensions'): self.compiler._c_extensions.append('.cu') # needed for Windows self.compiler.spawn = self.spawn build_ext.build_extensions(self)
def build_extensions(self): compiler_type = self.compiler.compiler_type if compiler_type in "unix": for ext in self.extensions: # on some Unix-like systems, such as Linux, the libc math # library is not linked by default: # https://github.com/cython/cython/issues/1585 ext.extra_link_args.append("-lm") build_ext.build_extensions(self)
def build_extensions(self): ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) import pybind11 for ext in self.extensions: ext.extra_compile_args = opts ext.include_dirs.append(pybind11.get_include()) ext.include_dirs.append(pybind11.get_include(user=True)) build_ext.build_extensions(self)
def build_extensions(self): import pkg_resources numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') for ext in self.extensions: if (hasattr(ext, 'include_dirs') and not numpy_incl in ext.include_dirs): ext.include_dirs.append(numpy_incl) _build_ext.build_extensions(self)
def build_extensions(self): c = self.compiler opts = [v for k, v in compiler_opts.items() if isinstance(c, k)] for e in self.extensions: for o in opts: for attrib, value in o.items(): getattr(e, attrib).extend(value) build_ext.build_extensions(self)
def build_extensions(self): c = self.compiler.compiler_type if c in extra_compile_args: for e in self.extensions: e.extra_compile_args = extra_compile_args[c] if c in extra_link_args: for e in self.extensions: e.extra_link_args = extra_link_args[c] build_ext.build_extensions(self)
def build_extensions(self): c = self.compiler.compiler_type # set custom compiler options if c in list(copt.keys()): for e in self.extensions: e.extra_compile_args = copt[c] if c in list(lopt.keys()): for e in self.extensions: e.extra_link_args = lopt[c] build_ext.build_extensions(self)
def build_extensions(self): c = self.compiler.compiler_type print('Compiler:', c) if c in copt: for e in self.extensions: e.extra_compile_args = copt[c] if c in lopt: for e in self.extensions: e.extra_link_args = lopt[c] build_ext.build_extensions(self)
def build_extensions(self): compiler = self.compiler.compiler_type if compiler == 'msvc': extra = ('/D_CRT_SECURE_NO_DEPRECATE', '/EHsc', '/wd4355', '/wd4800') else: extra = ('-Wno-switch',) for ext in self.extensions: ext.extra_compile_args += extra build_ext.build_extensions(self)
def build_extensions(self): # On some platforms, like Windows, compiler_cxx is not available. if hasattr(self.compiler, 'compiler_cxx'): compiler = self.compiler.compiler_cxx[0] else: compiler = os.environ.get('CXX', 'c++') check_compiler_abi_compatibility(compiler) for extension in self.extensions: extension.extra_compile_args = ['-std=c++11'] build_ext.build_extensions(self)
def build_extensions(self): ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) if ct == 'unix': opts.append(cpp_flag(self.compiler)) if has_flag(self.compiler, '-fvisibility=hidden'): opts.append('-fvisibility=hidden') for ext in self.extensions: ext.extra_compile_args = opts build_ext.build_extensions(self)
def build_extensions(self): extra_compile_args = ['-g0'] for c in ['-w', '-Ofast', '-ffast-math', '-march=native']: if has_flag(self.compiler, c): extra_compile_args.append(c) for e in self.extensions: e.extra_compile_args += extra_compile_args e.include_dirs.extend([get_include()]) build_ext.build_extensions(self)
def build_extensions(self): if self.compiler.compiler_type in translator: trans = translator[self.compiler.compiler_type] else: trans = translator["default"] for e in self.extensions: e.extra_compile_args = [trans[a][0] if a in trans else a for a in e.extra_compile_args] e.extra_link_args = [trans[a][1] if a in trans else a for a in e.extra_link_args] e.libraries = [trans[arg] for arg in e.libraries if arg in trans] build_ext.build_extensions(self)
def build_extensions(self): c = self.compiler.compiler_type if c == 'unix': # Remove -Wstrict-prototypes since we're compiling C++ so = self.compiler.compiler_so if '-Wstrict-prototypes' in so: so.remove('-Wstrict-prototypes') if c in cflags: for e in self.extensions: e.extra_compile_args = cflags[c] build_ext.build_extensions(self)
def build_extensions(self): cc = self.compiler.executables['compiler_cxx'][0] comp_type = get_compiler(cc) if cc == comp_type: print('Using compiler %s'%(cc)) else: print('Using compiler %s, which is %s'%(cc,comp_type)) for e in self.extensions: e.extra_compile_args = copt[ comp_type ] e.extra_link_args = lopt[ comp_type ] e.include_dirs = ['include'] build_ext.build_extensions(self)
def build_extensions(self): c = self.compiler.compiler_type print("Compiling with "+c) if c in copt: for e in self.extensions: e.extra_compile_args = copt[c] if c in lopt: for e in self.extensions: e.extra_link_args = lopt[c] if c in libs: for e in self.extensions: e.libraries = libs[c] build_ext.build_extensions(self)
def build_extensions(self): # print("Compiler: %s" % self.compiler.compiler_type) if self.compiler.compiler_type in self.translator: trans = self.translator[self.compiler.compiler_type] else: trans = self.translator['default'] for e in self.extensions: e.extra_compile_args = [trans[arg][0] if arg in trans else arg for arg in e.extra_compile_args] e.extra_link_args = [trans[arg][1] if arg in trans else arg for arg in e.extra_link_args] e.libraries = [trans[arg] for arg in e.libraries if arg in trans] build_ext.build_extensions(self)
def build_extensions(self): ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) if ct == 'unix': opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version()) for ext in self.extensions: ext.extra_compile_args = deepcopy(opts) ext.extra_link_args = deepcopy(opts) lang = ext.language or self.compiler.detect_language(ext.sources) if lang == 'c++': ext.extra_compile_args.append(cpp_flag(self.compiler)) ext.extra_link_args.append(cpp_flag(self.compiler)) build_ext.build_extensions(self)
def build_extensions(self): ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) for ext in self.extensions: ext.extra_compile_args = opts build_ext.build_extensions(self)
def build_extensions(self): # The include directory for the celerite headers localincl = "vendor" if not os.path.exists( os.path.join(localincl, "eigen_3.3.4", "Eigen", "Core")): raise RuntimeError("couldn't find Eigen headers") # Add the pybind11 include directory import numpy import pybind11 include_dirs = [ os.path.join("george", "include"), os.path.join(localincl, "eigen_3.3.4"), numpy.get_include(), pybind11.get_include(False), pybind11.get_include(True), ] for ext in self.extensions: ext.include_dirs = include_dirs + ext.include_dirs # Compiler flags ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) if ct == "unix": opts.append("-DVERSION_INFO=\"{0:s}\"".format( self.distribution.get_version())) print("testing C++14/C++11 support") opts.append(cpp_flag(self.compiler)) flags = [ "-stdlib=libc++", "-funroll-loops", "-Wno-unused-function", "-Wno-uninitialized", "-Wno-unused-local-typedefs" ] # Mac specific flags and libraries if sys.platform == "darwin": flags += ["-march=native", "-mmacosx-version-min=10.9"] for lib in ["m", "c++"]: for ext in self.extensions: ext.libraries.append(lib) for ext in self.extensions: ext.extra_link_args += [ "-mmacosx-version-min=10.9", "-march=native" ] else: libraries = ["m", "stdc++", "c++"] for lib in libraries: if not has_library(self.compiler, lib): continue for ext in self.extensions: ext.libraries.append(lib) # Check the flags print("testing compiler flags") for flag in flags: if has_flag(self.compiler, flag): opts.append(flag) elif ct == "msvc": opts.append("/DVERSION_INFO=\\\"{0:s}\\\"".format( self.distribution.get_version())) for ext in self.extensions: ext.extra_compile_args = opts # Run the standard build procedure. _build_ext.build_extensions(self)
def build_extensions(self): if LooseVersion(tf.__version__) < LooseVersion('1.4'): self.compiler.compiler_so.remove('-Wstrict-prototypes') orig_build_ext.build_extensions(self)
def build_extensions(self): if OS_NAME.startswith("darwin"): self._valid_clang_compiler() self._check_abi() # Note(Aurelius84): If already compiling source before, we should check whether # cflags have changed and delete the built shared library to re-compile the source # even though source file content keep unchanged. so_name = self.get_ext_fullpath(self.extensions[0].name) clean_object_if_change_cflags(os.path.abspath(so_name), self.extensions[0]) # Consider .cu, .cu.cc as valid source extensions. self.compiler.src_extensions += ['.cu', '.cu.cc'] # Save the original _compile method for later. if self.compiler.compiler_type == 'msvc': self.compiler._cpp_extensions += ['.cu', '.cuh'] original_compile = self.compiler.compile original_spawn = self.compiler.spawn else: original_compile = self.compiler._compile def unix_custom_single_compiler(obj, src, ext, cc_args, extra_postargs, pp_opts): """ Monkey patch machanism to replace inner compiler to custom complie process on Unix platform. """ # use abspath to ensure no warning and don't remove deecopy because modify params # with dict type is dangerous. src = os.path.abspath(src) cflags = copy.deepcopy(extra_postargs) try: original_compiler = self.compiler.compiler_so # nvcc or hipcc compile CUDA source if is_cuda_file(src): if core.is_compiled_with_rocm(): assert ROCM_HOME is not None, "Not found ROCM runtime, \ please use `export ROCM_PATH= XXX` to specify it." hipcc_cmd = os.path.join(ROCM_HOME, 'bin', 'hipcc') self.compiler.set_executable('compiler_so', hipcc_cmd) # {'nvcc': {}, 'cxx: {}} if isinstance(cflags, dict): cflags = cflags['hipcc'] else: assert CUDA_HOME is not None, "Not found CUDA runtime, \ please use `export CUDA_HOME= XXX` to specify it." nvcc_cmd = os.path.join(CUDA_HOME, 'bin', 'nvcc') self.compiler.set_executable('compiler_so', nvcc_cmd) # {'nvcc': {}, 'cxx: {}} if isinstance(cflags, dict): cflags = cflags['nvcc'] cflags = prepare_unix_cudaflags(cflags) # cxx compile Cpp source elif isinstance(cflags, dict): cflags = cflags['cxx'] # Note(qili93): HIP require some additional flags for CMAKE_C_FLAGS if core.is_compiled_with_rocm(): cflags.append('-D__HIP_PLATFORM_HCC__') cflags.append('-D__HIP_NO_HALF_CONVERSIONS__=1') cflags.append( '-DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_HIP') # NOTE(Aurelius84): Since Paddle 2.0, we require gcc version > 5.x, # so we add this flag to ensure the symbol names from user compiled # shared library have same ABI suffix with core_(no)avx.so. # See https://stackoverflow.com/questions/34571583/understanding-gcc-5s-glibcxx-use-cxx11-abi-or-the-new-abi add_compile_flag(cflags, ['-D_GLIBCXX_USE_CXX11_ABI=1']) # Append this macor only when jointly compiling .cc with .cu if not is_cuda_file(src) and self.contain_cuda_file: if core.is_compiled_with_rocm(): cflags.append('-DPADDLE_WITH_HIP') else: cflags.append('-DPADDLE_WITH_CUDA') add_std_without_repeat(cflags, self.compiler.compiler_type, use_std14=True) original_compile(obj, src, ext, cc_args, cflags, pp_opts) finally: # restore original_compiler self.compiler.set_executable('compiler_so', original_compiler) def win_custom_single_compiler(sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): self.cflags = copy.deepcopy(extra_postargs) extra_postargs = None def win_custom_spawn(cmd): # Using regex to modify compile options compile_options = self.compiler.compile_options for i in range(len(cmd)): if re.search('/MD', cmd[i]) is not None: cmd[i] = '/MT' if re.search('/W[1-4]', cmd[i]) is not None: cmd[i] = '/W0' # Using regex to match src, obj and include files src_regex = re.compile('/T(p|c)(.*)') src_list = [ m.group(2) for m in (src_regex.match(elem) for elem in cmd) if m ] obj_regex = re.compile('/Fo(.*)') obj_list = [ m.group(1) for m in (obj_regex.match(elem) for elem in cmd) if m ] include_regex = re.compile(r'((\-|\/)I.*)') include_list = [ m.group(1) for m in (include_regex.match(elem) for elem in cmd) if m ] assert len(src_list) == 1 and len(obj_list) == 1 src = src_list[0] obj = obj_list[0] if is_cuda_file(src): assert CUDA_HOME is not None, "Not found CUDA runtime, \ please use `export CUDA_HOME= XXX` to specify it." nvcc_cmd = os.path.join(CUDA_HOME, 'bin', 'nvcc') if isinstance(self.cflags, dict): cflags = self.cflags['nvcc'] elif isinstance(self.cflags, list): cflags = self.cflags else: cflags = [] cflags = prepare_win_cudaflags(cflags) + [ '--use-local-env' ] for flag in MSVC_COMPILE_FLAGS: cflags = ['-Xcompiler', flag] + cflags cmd = [nvcc_cmd, '-c', src, '-o', obj ] + include_list + cflags elif isinstance(self.cflags, dict): cflags = MSVC_COMPILE_FLAGS + self.cflags['cxx'] cmd += cflags elif isinstance(self.cflags, list): cflags = MSVC_COMPILE_FLAGS + self.cflags cmd += cflags # Append this macor only when jointly compiling .cc with .cu if not is_cuda_file(src) and self.contain_cuda_file: cmd.append('-DPADDLE_WITH_CUDA') return original_spawn(cmd) try: self.compiler.spawn = win_custom_spawn return original_compile(sources, output_dir, macros, include_dirs, debug, extra_preargs, extra_postargs, depends) finally: self.compiler.spawn = original_spawn def object_filenames_with_cuda(origina_func, build_directory): """ Decorated the function to add customized naming machanism. Originally, both .cc/.cu will have .o object output that will bring file override problem. Use .cu.o as CUDA object suffix. """ def wrapper(source_filenames, strip_dir=0, output_dir=''): try: objects = origina_func(source_filenames, strip_dir, output_dir) for i, source in enumerate(source_filenames): # modify xx.o -> xx.cu.o/xx.cu.obj if is_cuda_file(source): old_obj = objects[i] if self.compiler.compiler_type == 'msvc': objects[i] = old_obj[:-3] + 'cu.obj' else: objects[i] = old_obj[:-1] + 'cu.o' # if user set build_directory, output objects there. if build_directory is not None: objects = [ os.path.join(build_directory, os.path.basename(obj)) for obj in objects ] # ensure to use abspath objects = [os.path.abspath(obj) for obj in objects] finally: self.compiler.object_filenames = origina_func return objects return wrapper # customized compile process if self.compiler.compiler_type == 'msvc': self.compiler.compile = win_custom_single_compiler else: self.compiler._compile = unix_custom_single_compiler self.compiler.object_filenames = object_filenames_with_cuda( self.compiler.object_filenames, self.build_lib) self._record_op_info() print("Compiling user custom op, it will cost a few seconds.....") build_ext.build_extensions(self) # Reset runtime library path on MacOS platform so_path = self.get_ext_fullpath(self.extensions[0]._full_name) _reset_so_rpath(so_path)
def build_extensions(self): _build_ext.build_extensions(self)
def build_extensions(self): self.set_shared_ptr_flags() build_ext.build_extensions(self)
def build_extensions(self): fully_define_extension(self) build_ext.build_extensions(self)
def build_extensions(self): """Set up the build extensions.""" # TODO: move build customization here? build_ext.build_extensions(self)
def build_extensions(self): import subprocess if not os.path.exists(CONF_H_NAME): subprocess.check_call(["sh", "./configure"], cwd=XPALIB_DIR) build_ext.build_extensions(self)
def build_extensions(self): customize_compiler_for_nvcc(self.compiler) build_ext.build_extensions(self)
def build_extensions(self): for e in self.extensions: e.extra_compile_args = extra_compile_args build_ext.build_extensions(self)
def build_extensions(self): libfuzzer = get_libfuzzer_lib() orig_libfuzzer = libfuzzer orig_libfuzzer_name = os.path.basename(libfuzzer) version = check_libfuzzer_version(libfuzzer) if sys.platform == "darwin" and version != "up-to-date": raise RuntimeError(too_old_error) if version == "outdated-unrecoverable": raise RuntimeError(too_old_error) elif version == "outdated-recoverable": sys.stderr.write( "Your libFuzzer version is too old, but it's possible " "to attempt an in-place upgrade. Trying that now.\n") libfuzzer = upgrade_libfuzzer(libfuzzer) if check_libfuzzer_version(libfuzzer) != "up-to-date": sys.stderr.write("Upgrade failed.") raise RuntimeError(too_old_error) elif version != "up-to-date": raise RuntimeError("Unexpected up-to-date status: " + version) sys.stderr.write("Your libFuzzer is up-to-date.\n") c_opts = ["-Wno-deprecated-declarations", "-Wno-attributes"] l_opts = [] if sys.platform == "darwin": darwin_opts = ["-stdlib=libc++", "-mmacosx-version-min=10.7"] c_opts += darwin_opts l_opts += darwin_opts ct = self.compiler.compiler_type if ct == "unix": c_opts.append(cpp_flag(self.compiler)) for ext in self.extensions: ext.define_macros = [ ("VERSION_INFO", "'{}'".format(self.distribution.get_version())), ("ATHERIS_MODULE_NAME", ext.name.split(".")[1]) ] ext.extra_compile_args = c_opts if ext.name == "atheris.core_with_libfuzzer": ext.extra_link_args = l_opts + [libfuzzer] else: ext.extra_link_args = l_opts build_ext.build_extensions(self) try: self.deploy_file(libfuzzer, orig_libfuzzer_name) except Exception as e: sys.stderr.write(str(e)) sys.stderr.write("\n") # Deploy versions of ASan and UBSan that have been merged with libFuzzer asan_name = orig_libfuzzer.replace(".fuzzer_no_main-", ".asan-") merged_asan_name = "asan_with_fuzzer.so" self.merge_deploy_libfuzzer_sanitizer( libfuzzer, asan_name, merged_asan_name, "asan_preinit.cc.o asan_preinit.cpp.o") ubsan_name = orig_libfuzzer.replace(".fuzzer_no_main-", ".ubsan_standalone-") merged_ubsan_name = "ubsan_with_fuzzer.so" self.merge_deploy_libfuzzer_sanitizer( libfuzzer, ubsan_name, merged_ubsan_name, "ubsan_init_standalone_preinit.cc.o ubsan_init_standalone_preinit.cpp.o" ) ubsanxx_name = orig_libfuzzer.replace(".fuzzer_no_main-", ".ubsan_standalone_cxx-") merged_ubsanxx_name = "ubsan_cxx_with_fuzzer.so" self.merge_deploy_libfuzzer_sanitizer( libfuzzer, ubsanxx_name, merged_ubsanxx_name, "ubsan_init_standalone_preinit.cc.o ubsan_init_standalone_preinit.cpp.o" )
def build_extensions(self): c = self.compiler.compiler_type if c in COPT: for e in self.extensions: e.extra_compile_args = COPT[c] build_ext.build_extensions(self)
def build_extensions(self): self.patch_compiler() for ext in self.extensions: self.patch_extension(ext) build_ext.build_extensions(self)
def build_extensions(self): """Compile the python extension module for further installation.""" global final_supported_archs ext_extra_objects = [] ext_libs = [] ext_libs_dir = [] ext_includes = [] self.platform = CustomBuildExtension.PLATFORMS.get( sys.platform, None ) if self.platform == None: raise Exception("unsupported platform: %s" % sys.platform) if self.with_static_binutils: # the user has specified a custom binutils distro. print("[+] Using specific binutils static distribution") print("[+] %s" % self.with_static_binutils) self.platform["libs"] = [os.path.join( self.with_static_binutils, "lib"),] self.platform["includes"] = [os.path.join( self.with_static_binutils, "include"),] self.platform["possible-lib-ext"] = [".a",] # for all unix platforms. # check for known includes for inc in self.platform["includes"]: if self.check_includes(inc): self.includes = inc # found a valid include dir with bintuils break if self.includes == None: raise Exception("unable to determine correct include path for bfd.h / dis-asm.h") print("[+] Using binutils headers at:") print("[+] %s" % self.includes) # we'll use this include path for building. ext_includes = [self.includes, ] # Try to guess libopcodes / libbfd libs. libs_dirs = self.platform["libs"] print("[+] Searching binutils libraries...") for libdir in libs_dirs: for possible_lib_ext in self.platform["possible-lib-ext"]: libs = self.find_binutils_libs(libdir, possible_lib_ext) if libs: if self.libs: self.libs = self.libs + libs else: self.libs = libs break if self.libs == None: raise Exception("unable to find binutils libraries.") for lib in self.libs: print("[+] %s" % lib) # # check for libopcodes / libbfd # libnames = [os.path.basename(lib) for lib in self.libs] libraries_paths = [os.path.dirname(lib) for lib in self.libs] libraries_paths = list(set(libraries_paths)) # removing duplicates if not all( [lib.startswith("libopcodes") or lib.startswith("libbfd") for lib in libnames] ): raise Exception("missing expected library (libopcodes / libbfd) in %s." % "\n".join(libraries_paths)) ext_libs_dir += libraries_paths if self.with_static_binutils: # use libs as extra objects... ext_extra_objects.extend( self.libs ) else: ext_libs = [self.prepare_libs_for_cc(os.path.basename(lib)) for lib in self.libs] # add dependecy to libiberty if self.with_static_binutils or sys.platform == "darwin": # in OSX we always needs a static lib-iverty. lib_liberty_partialpath = [lib_path for lib_path in libraries_paths] # NOTE: At least on Catalina, this is no longer the case # if sys.platform == "darwin": # in osx the lib-iberty is prefixe by "machine" ppc/i386/x86_64 # lib_liberty_partialpath.append( self._darwin_current_arch() ) lib_liberty_partialpath.append( "libiberty.a" ) lib_liberty_fullpath = os.path.join(*lib_liberty_partialpath ) # merge the prefix and the path if not os.path.isfile(lib_liberty_fullpath): raise Exception("missing expected library (libiberty) in %s." % "\n".join(libraries_paths)) ext_extra_objects.append(lib_liberty_fullpath) # add dependecy to zlib and dl if self.with_static_binutils: lib_zlib_partialpath = [lib_path for lib_path in libraries_paths] lib_zlib_partialpath.append( "libz.so" ) lib_zlib_fullpath = os.path.join(*lib_zlib_partialpath ) # merge the prefix and the path if not os.path.isfile(lib_zlib_fullpath): raise Exception("missing expected library (libz) in %s." % "\n".join(libraries_paths)) ext_extra_objects.append(lib_zlib_fullpath) lib_dl_partialpath = [lib_path for lib_path in libraries_paths] lib_dl_partialpath.append( "libdl.so" ) lib_dl_fullpath = os.path.join(*lib_dl_partialpath ) # merge the prefix and the path if not os.path.isfile(lib_dl_fullpath): raise Exception("missing expected library (libdl) in %s." % "\n".join(libraries_paths)) ext_extra_objects.append(lib_dl_fullpath) # generate .py / .h files that depends of libopcodes / libbfd currently selected final_supported_archs, macros = self.generate_source_files() # final hacks for OSX if sys.platform == "darwin": # fix arch value. os.environ["ARCHFLAGS"] = "-arch %s" % self._darwin_current_arch() # In OSX we've to link against libintl. ext_libs.append("intl") # In OSX we also must link against libz ext_libs.append("z") # TODO: we have to improve the detection of gettext/libintl in OSX.. this is a quick fix. dirs = [ "/usr/local/opt/gettext/lib", # homebrew "/opt/local/lib" # macports ] for d in dirs: if os.path.exists(d): ext_libs_dir.append(d) # fix extensions. for extension in self.extensions: extension.include_dirs.extend( ext_includes ) extension.extra_objects.extend( ext_extra_objects ) extension.libraries.extend( ext_libs ) extension.library_dirs.extend( ext_libs_dir ) extension.define_macros.extend( macros ) return build_ext.build_extensions(self)
def build_extensions(self): build_ext_options.build_options(self) _build_ext.build_extensions(self)
def build_extensions(self): self.check_requirements() build_ext.build_extensions(self)
def build_extensions(self): self._check_abi() for extension in self.extensions: self._add_compile_flag(extension, '-DTORCH_API_INCLUDE_EXTENSION_H') self._define_torch_extension_name(extension) self._add_gnu_abi_flag_if_binary(extension) # Register .cu and .cuh as valid source extensions. self.compiler.src_extensions += ['.cu', '.cuh'] # Save the original _compile method for later. if self.compiler.compiler_type == 'msvc': self.compiler._cpp_extensions += ['.cu', '.cuh'] original_compile = self.compiler.compile original_spawn = self.compiler.spawn else: original_compile = self.compiler._compile def unix_wrap_compile(obj, src, ext, cc_args, extra_postargs, pp_opts): # Copy before we make any modifications. cflags = copy.deepcopy(extra_postargs) try: original_compiler = self.compiler.compiler_so if _is_cuda_file(src): nvcc = _join_cuda_home('bin', 'nvcc') self.compiler.set_executable('compiler_so', nvcc) if isinstance(cflags, dict): cflags = cflags['nvcc'] cflags = COMMON_NVCC_FLAGS + ['--compiler-options', "'-fPIC'"] + cflags elif isinstance(cflags, dict): cflags = cflags['cxx'] # NVCC does not allow multiple -std to be passed, so we avoid # overriding the option if the user explicitly passed it. if not any(flag.startswith('-std=') for flag in cflags): cflags.append('-std=c++11') original_compile(obj, src, ext, cc_args, cflags, pp_opts) finally: # Put the original compiler back in place. self.compiler.set_executable('compiler_so', original_compiler) def win_wrap_compile(sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): self.cflags = copy.deepcopy(extra_postargs) extra_postargs = None def spawn(cmd): orig_cmd = cmd # Using regex to match src, obj and include files src_regex = re.compile('/T(p|c)(.*)') src_list = [ m.group(2) for m in (src_regex.match(elem) for elem in cmd) if m ] obj_regex = re.compile('/Fo(.*)') obj_list = [ m.group(1) for m in (obj_regex.match(elem) for elem in cmd) if m ] include_regex = re.compile(r'((\-|\/)I.*)') include_list = [ m.group(1) for m in (include_regex.match(elem) for elem in cmd) if m ] if len(src_list) >= 1 and len(obj_list) >= 1: src = src_list[0] obj = obj_list[0] if _is_cuda_file(src): nvcc = _join_cuda_home('bin', 'nvcc') if isinstance(self.cflags, dict): cflags = self.cflags['nvcc'] elif isinstance(self.cflags, list): cflags = self.cflags else: cflags = [] cmd = [ nvcc, '-c', src, '-o', obj, '-Xcompiler', '/wd4819', '-Xcompiler', '/MD' ] + include_list + cflags elif isinstance(self.cflags, dict): cflags = self.cflags['cxx'] cmd += cflags elif isinstance(self.cflags, list): cflags = self.cflags cmd += cflags return original_spawn(cmd) try: self.compiler.spawn = spawn return original_compile(sources, output_dir, macros, include_dirs, debug, extra_preargs, extra_postargs, depends) finally: self.compiler.spawn = original_spawn # Monkey-patch the _compile method. if self.compiler.compiler_type == 'msvc': self.compiler.compile = win_wrap_compile else: self.compiler._compile = unix_wrap_compile build_ext.build_extensions(self)
def build_extensions(self): numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') for ext in self.extensions: if hasattr(ext, 'include_dirs') and not numpy_incl in ext.include_dirs: ext.include_dirs.append(numpy_incl) _build_ext.build_extensions(self)
def build_extensions(self): try: self.compiler.compiler_so.remove("-Wstrict-prototypes") except (AttributeError, ValueError): pass _build_ext.build_extensions(self)
def build_extensions(self): if self.compiler.compiler_type != "msvc": for ext in self.extensions: ext.extra_compile_args.append("-std=c99") build_ext.build_extensions(self)
def build_extensions(self): options = prepare_extension_options() for key, val in options.items(): singa_wrap.__dict__[key] = val customize_compiler_for_nvcc(self.compiler) build_ext.build_extensions(self)
def build_extensions(self): try: self.compiler.compiler_so.remove("-Wstrict-prototypes") except (AttributeError, ValueError): pass self.compiler.src_extensions += ['.cu', '.cuh'] if self.compiler.compiler_type == 'msvc': self.compiler._cpp_extensions += ['.cu', '.cuh'] original_compile = self.compiler.compile original_spawn = self.compiler.spawn else: original_compile = self.compiler._compile original_object_filenames = self.compiler.object_filenames def object_filenames(source_filenames, strip_dir, output_dir): """Patch to make the cuda objects unique.""" objects = original_object_filenames(source_filenames, strip_dir, output_dir) for i, src_name in enumerate(source_filenames): if _is_cuda_file(src_name): _, src_ext = _os.path.splitext(src_name) obj_base, obj_ext = _os.path.splitext(objects[i]) objects[i] = obj_base + src_ext + obj_ext return objects def unix_compile(obj, src, ext, cc_args, extra_postargs, pp_opts): """Patch to support cuda sources.""" original_compiler = self.compiler.compiler_so try: cflags = _copy.deepcopy(extra_postargs) if _is_cuda_file(src): nvcc = [_join_cuda_path('bin', 'nvcc')] self.compiler.set_executable('compiler_so', nvcc) if isinstance(cflags, dict): cflags = cflags['nvcc'] cflags = (COMMON_NVCC_FLAGS + ['--compiler-options', "'-fPIC'"] + cflags + _get_cuda_arch_flags(cflags)) else: if isinstance(cflags, dict): cflags = cflags['cxx'] cflags += COMMON_CC_FLAGS if not any(flag.startswith('-std=') for flag in cflags): cflags.append('-std=c++14') original_compile(obj, src, ext, cc_args, cflags, pp_opts) finally: self.compiler.set_executable('compiler_so', original_compiler) def win_compile( sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None, ): compile_info = \ self.compiler._setup_compile( output_dir, macros, include_dirs, sources, depends, extra_postargs, ) _, _, _, pp_opts, _ = compile_info self.cflags = _copy.deepcopy(extra_postargs) extra_postargs = None def spawn(cmd): # Using regex to match src, obj and include files. src_regex = _re.compile('/T(p|c)(.*)') src_list = [ m.group(2) for m in (src_regex.match(elem) for elem in cmd) if m ] obj_regex = _re.compile('/Fo(.*)') obj_list = [ m.group(1) for m in (obj_regex.match(elem) for elem in cmd) if m ] include_regex = _re.compile(r'((\-|\/)I.*)') include_list = [ m.group(1) for m in (include_regex.match(elem) for elem in cmd) if m ] if len(src_list) >= 1 and len(obj_list) >= 1: src = src_list[0] obj = obj_list[0] if _is_cuda_file(src): nvcc = _join_cuda_path('bin', 'nvcc') if isinstance(self.cflags, dict): cflags = self.cflags['nvcc'] elif isinstance(self.cflags, list): cflags = self.cflags else: cflags = [] cflags = COMMON_NVCC_FLAGS + cflags + _get_cuda_arch_flags( cflags) for flag in COMMON_MSVC_FLAGS: cflags = ['-Xcompiler', flag] + cflags cmd = [nvcc, '-c', src, '-o', obj ] + pp_opts + include_list + cflags elif isinstance(self.cflags, dict): cflags = COMMON_MSVC_FLAGS + self.cflags['cxx'] cmd += cflags elif isinstance(self.cflags, list): cflags = COMMON_MSVC_FLAGS + self.cflags cmd += cflags if '/MD' in cmd: cmd.remove('/MD') return original_spawn(cmd) try: self.compiler.spawn = spawn return original_compile(sources, output_dir, macros, include_dirs, debug, extra_preargs, extra_postargs, depends) finally: self.compiler.spawn = original_spawn if self.compiler.compiler_type == 'msvc': self.compiler.compile = win_compile else: self.compiler._compile = unix_compile self.compiler.object_filenames = object_filenames _build_ext.build_extensions(self) self.compiler.object_filenames = original_object_filenames
def build_extensions(self): # Add the required Eigen include directory dirs = self.compiler.include_dirs for ext in self.extensions: dirs += ext.include_dirs include_dirs = [] eigen_include = find_eigen(hint=dirs) if eigen_include is None: logging.warn("Required library Eigen 3 not found.") else: include_dirs += [eigen_include] # Add the pybind11 include directory import pybind11 include_dirs += [ pybind11.get_include(False), pybind11.get_include(True), ] for ext in self.extensions: ext.include_dirs += include_dirs # Set up pybind11 ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) if ct == 'unix': opts.append('-DVERSION_INFO="{0:s}"' .format(self.distribution.get_version())) opts.append(cpp_flag(self.compiler)) if has_flag(self.compiler, '-fvisibility=hidden'): opts.append('-fvisibility=hidden') for flag in ["-Wno-unused-function", "-Wno-uninitialized", "-O4"]: if has_flag(self.compiler, flag): opts.append(flag) elif ct == 'msvc': opts.append('/DVERSION_INFO=\\"{0:s}\\"' .format(self.distribution.get_version())) for ext in self.extensions: ext.extra_compile_args = opts # Building on RTD doesn't require the extra speedups and it seems to # fail for some reason so we'll bail early. if os.environ.get("READTHEDOCS", None) == "True": _build_ext.build_extensions(self) return # Enable Eigen/Sparse support with_sparse = os.environ.get("WITH_SPARSE", None) if with_sparse is None or with_sparse.lower() != "false": for ext in self.extensions: ext.define_macros += [("WITH_SPARSE", None)] # Link to numpy's LAPACK if available variant = os.environ.get("LAPACK_VARIANT", None) if variant is not None and variant.lower() != "none": info = get_info(variant) if not len(info): logging.warn("LAPACK info for variant '{0}' not found") info = get_info("blas_opt") for ext in self.extensions: for k, v in info.items(): try: setattr(ext, k, getattr(ext, k) + v) except TypeError: continue ext.define_macros += [ ("WITH_LAPACK", None), ("LAPACK_VARIANT", variant) ] # Run the standard build procedure. _build_ext.build_extensions(self)
def run(self): cwd = pathlib.Path().absolute() build_temp = pathlib.Path("build") build_temp.mkdir(parents=True, exist_ok=True) config = 'Release' cmake_args = [ '-DBUILD_JAVA=OFF', '-DBUILD_PYTHON=ON', '-DCMAKE_BUILD_TYPE=Release', ] if sys.platform == 'win32' or sys.platform == 'cygwin': cmake_args.append('-DCMAKE_DISABLE_FIND_PACKAGE_boost_serialization=TRUE ') cmake_args.append('-DCMAKE_DISABLE_FIND_PACKAGE_boost_iostreams=TRUE ') if self.install_dir is not None: cmake_args.append('-DCMAKE_INSTALL_PREFIX='+self.install_dir) if self.cgal_dir is not None: cmake_args.append('-DCGAL_DIR='+self.cgal_dir) if self.boost_dir is not None: cmake_args.append('-DBoost_INCLUDE_DIR='+self.boost_dir) if self.tbb_include_dir is not None: cmake_args.append('-DTBB_INCLUDE_DIR='+self.tbb_include_dir) if self.tbb_lib is not None: cmake_args.append('-DTBB_LIBRARY_RELEASE='+self.tbb_lib) if self.tbb_malloc_include_dir is not None: cmake_args.append('-DTBB_MALLOC_LIBRARY_RELEASE='+self.tbb_malloc_include_dir) if self.tbb_malloc_lib is not None: cmake_args.append('-DTBB_MALLOC_INCLUDE_DIR='+self.tbb_malloc_lib) if self.boost_serialization_lib is not None: cmake_args.append('-DBoost_SERIALIZATION_LIBRARY_RELEASE='+self.boost_serialization_lib) if self.boost_iostreams_lib is not None: cmake_args.append('-DBoost_IOSTREAMS_LIBRARY_RELEASE='+self.boost_iostreams_lib) if self.boost_regex_lib is not None: cmake_args.append('-DBoost_REGEX_LIBRARY_RELEASE='+self.boost_regex_lib) if self.boost_serialization_include_dir is not None: cmake_args.append('-DBoost_SERIALIZATION_INCLUDE_DIR='+self.boost_serialization_include_dir) if self.boost_iostreams_include_dir is not None: cmake_args.append('-DBoost_IOSTREAMS_INCLUDE_DIR='+self.boost_iostreams_include_dir) if self.boost_root is not None: cmake_args.append('-DBoost_DIR='+self.boost_root) if self.laslib_include_dir is not None: cmake_args.append('-DLASLIB_INCLUDE_DIR='+self.laslib_include_dir) if self.laslib_lib is not None: cmake_args.append('-DLASLIB_LIBRARIES='+self.laslib_lib) if self.eigen3_dir is not None: cmake_args.append('-DEIGEN3_INCLUDE_DIR='+self.eigen3_dir) if self.gmp_include_dir is not None: cmake_args.append('-DGMP_INCLUDE_DIR='+self.gmp_include_dir) if self.gmp_lib is not None: cmake_args.append('-DGMP_LIBRARIES='+self.gmp_lib) if self.mpfr_include_dir is not None: cmake_args.append('-DMPFR_INCLUDE_DIR='+self.mpfr_include_dir) if self.mpfr_lib is not None: cmake_args.append('-DMPFR_LIBRARIES='+self.mpfr_lib) if self.zlib_include_dir is not None: cmake_args.append('-DZLIB_INCLUDE_DIR='+self.zlib_include_dir) if self.zlib_lib is not None: cmake_args.append('-DZLIB_LIBRARIES='+self.zlib_lib) if self.cmake_prefix_path is not None: cmake_args.append('-DCMAKE_PREFIX_PATH='+self.cmake_prefix_path) if self.python_root is not None: cmake_args.append('-DPython_ROOT_DIR='+self.python_root) if sys.platform != 'win32'or sys.platform == 'cygwin': cmake_args.append('-DPython_EXECUTABLE='+os.path.join(self.python_root, 'bin','python')) cmake_args.append('-DINSTALL_FROM_SETUP=ON') cmake_args.append('-DBoost_LIB_DIAGNOSTIC_DEFINITIONS=TRUE') cmake_args.append('-DBoost_DEBUG=TRUE') if sys.platform == 'win32' or sys.platform == 'cygwin': build_args = [ '--config', 'Release' ] else: build_args = [ '--', '-j4' ] self.cmake_cmd= 'cmake' if self.cmake is not None: self.cmake_cmd=self.cmake os.chdir(str(build_temp)) self.dry_run=None if self.generator: self.spawn([self.cmake_cmd, '-G', self.generator, str(cwd)] + cmake_args) else: self.spawn([self.cmake_cmd, str(cwd)] + cmake_args) self.spawn([self.cmake_cmd, '--build', '.'] + build_args) os.chdir(str(cwd)) build_directory = pathlib.Path(os.path.join(self.build_lib, 'CGAL')) build_directory.mkdir(parents=True, exist_ok=True) build_ext_orig.build_extensions(self)
def build_extensions(self): if not self.use_system_fitsio: # Use the compiler for building python to build cfitsio # for maximized compatibility. # there is some issue with non-aligned data with optimizations # set to '-O3' on some versions of gcc. It appears to be # a disagreement between gcc 4 and gcc 5 CCold = self.compiler.compiler CC = [] for val in CCold: if val == '-O3': print("replacing '-O3' with '-O2' to address " "gcc bug") val = '-O2' if val == 'ccache': print("removing ccache from the compiler options") continue CC.append(val) self.configure_cfitsio( CC=CC, ARCHIVE=self.compiler.archiver, RANLIB=self.compiler.ranlib, ) # If configure detected bzlib.h, we have to link to libbz2 with open(os.path.join(self.cfitsio_build_dir, 'Makefile')) as fp: _makefile = fp.read() if '-DHAVE_BZIP2=1' in _makefile: self.compiler.add_library('bz2') if '-DCFITSIO_HAVE_CURL=1' in _makefile: self.compiler.add_library('curl') self.compile_cfitsio() # link against the .a library in cfitsio; # It should have been a 'static' library of relocatable objects # (-fPIC), since we use the python compiler flags link_objects = glob.glob( os.path.join(self.cfitsio_build_dir, '*.a')) self.compiler.set_link_objects(link_objects) # Ultimate hack: append the .a files to the dependency list # so they will be properly rebuild if cfitsio source is updated. for ext in self.extensions: ext.depends += link_objects else: self.compiler.add_library('cfitsio') # Check if system cfitsio was compiled with bzip2 and/or curl if self.check_system_cfitsio_objects('bzip2'): self.compiler.add_library('bz2') if self.check_system_cfitsio_objects('curl_'): self.compiler.add_library('curl') # Make sure the external lib has the fits_use_standard_strings # function. If not, then define a macro to tell the wrapper # to always return False. if not self.check_system_cfitsio_objects( '_fits_use_standard_strings'): self.compiler.define_macro( 'FITSIO_PYWRAP_ALWAYS_NONSTANDARD_STRINGS') # fitsio requires libm as well. self.compiler.add_library('m') # call the original build_extensions build_ext.build_extensions(self)
def build_extensions(self): if self.compiler.compiler_type == 'unix': for e in self.extensions: e.extra_compile_args = ['-msse4.2', '-mbmi2', '-mavx2'] build_ext.build_extensions(self)
def build_extensions(self): self.compiler.compiler_so.remove('-Wstrict-prototypes') orig_build_ext.build_extensions(self)
def build_extensions(self): library_dirs = [] include_dirs = [] pkg_config = None if _cmd_exists(os.environ.get("PKG_CONFIG", "pkg-config")): pkg_config = _pkg_config # # add configured kits for root_name, lib_name in dict( JPEG_ROOT="libjpeg", JPEG2K_ROOT="libopenjp2", TIFF_ROOT=("libtiff-5", "libtiff-4"), ZLIB_ROOT="zlib", FREETYPE_ROOT="freetype2", HARFBUZZ_ROOT="harfbuzz", FRIBIDI_ROOT="fribidi", LCMS_ROOT="lcms2", IMAGEQUANT_ROOT="libimagequant", ).items(): root = globals()[root_name] if root is None and root_name in os.environ: prefix = os.environ[root_name] root = (os.path.join(prefix, "lib"), os.path.join(prefix, "include")) if root is None and pkg_config: if isinstance(lib_name, tuple): for lib_name2 in lib_name: _dbg(f"Looking for `{lib_name2}` using pkg-config.") root = pkg_config(lib_name2) if root: break else: _dbg(f"Looking for `{lib_name}` using pkg-config.") root = pkg_config(lib_name) if isinstance(root, tuple): lib_root, include_root = root else: lib_root = include_root = root _add_directory(library_dirs, lib_root) _add_directory(include_dirs, include_root) # respect CFLAGS/CPPFLAGS/LDFLAGS for k in ("CFLAGS", "CPPFLAGS", "LDFLAGS"): if k in os.environ: for match in re.finditer(r"-I([^\s]+)", os.environ[k]): _add_directory(include_dirs, match.group(1)) for match in re.finditer(r"-L([^\s]+)", os.environ[k]): _add_directory(library_dirs, match.group(1)) # include, rpath, if set as environment variables: for k in ("C_INCLUDE_PATH", "CPATH", "INCLUDE"): if k in os.environ: for d in os.environ[k].split(os.path.pathsep): _add_directory(include_dirs, d) for k in ("LD_RUN_PATH", "LIBRARY_PATH", "LIB"): if k in os.environ: for d in os.environ[k].split(os.path.pathsep): _add_directory(library_dirs, d) _add_directory(library_dirs, os.path.join(sys.prefix, "lib")) _add_directory(include_dirs, os.path.join(sys.prefix, "include")) # # add platform directories if self.disable_platform_guessing: pass elif sys.platform == "cygwin": # pythonX.Y.dll.a is in the /usr/lib/pythonX.Y/config directory _add_directory( library_dirs, os.path.join( "/usr/lib", "python{}.{}".format(*sys.version_info), "config" ), ) elif sys.platform == "darwin": # attempt to make sure we pick freetype2 over other versions _add_directory(include_dirs, "/sw/include/freetype2") _add_directory(include_dirs, "/sw/lib/freetype2/include") # fink installation directories _add_directory(library_dirs, "/sw/lib") _add_directory(include_dirs, "/sw/include") # darwin ports installation directories _add_directory(library_dirs, "/opt/local/lib") _add_directory(include_dirs, "/opt/local/include") # if Homebrew is installed, use its lib and include directories try: prefix = ( subprocess.check_output(["brew", "--prefix"]) .strip() .decode("latin1") ) except Exception: # Homebrew not installed prefix = None ft_prefix = None if prefix: # add Homebrew's include and lib directories _add_directory(library_dirs, os.path.join(prefix, "lib")) _add_directory(include_dirs, os.path.join(prefix, "include")) _add_directory( include_dirs, os.path.join(prefix, "opt", "zlib", "include") ) ft_prefix = os.path.join(prefix, "opt", "freetype") if ft_prefix and os.path.isdir(ft_prefix): # freetype might not be linked into Homebrew's prefix _add_directory(library_dirs, os.path.join(ft_prefix, "lib")) _add_directory(include_dirs, os.path.join(ft_prefix, "include")) else: # fall back to freetype from XQuartz if # Homebrew's freetype is missing _add_directory(library_dirs, "/usr/X11/lib") _add_directory(include_dirs, "/usr/X11/include") sdk_path = self.get_macos_sdk_path() if sdk_path: _add_directory(library_dirs, os.path.join(sdk_path, "usr", "lib")) _add_directory(include_dirs, os.path.join(sdk_path, "usr", "include")) elif ( sys.platform.startswith("linux") or sys.platform.startswith("gnu") or sys.platform.startswith("freebsd") ): for dirname in _find_library_dirs_ldconfig(): _add_directory(library_dirs, dirname) if sys.platform.startswith("linux") and os.environ.get( "ANDROID_ROOT", None ): # termux support for android. # system libraries (zlib) are installed in /system/lib # headers are at $PREFIX/include # user libs are at $PREFIX/lib _add_directory( library_dirs, os.path.join( os.environ["ANDROID_ROOT"], "lib" if struct.calcsize("l") == 4 else "lib64", ), ) elif sys.platform.startswith("netbsd"): _add_directory(library_dirs, "/usr/pkg/lib") _add_directory(include_dirs, "/usr/pkg/include") elif sys.platform.startswith("sunos5"): _add_directory(library_dirs, "/opt/local/lib") _add_directory(include_dirs, "/opt/local/include") # FIXME: check /opt/stuff directories here? # standard locations if not self.disable_platform_guessing: _add_directory(library_dirs, "/usr/local/lib") _add_directory(include_dirs, "/usr/local/include") _add_directory(library_dirs, "/usr/lib") _add_directory(include_dirs, "/usr/include") # alpine, at least _add_directory(library_dirs, "/lib") if sys.platform == "win32": # on Windows, look for the OpenJPEG libraries in the location that # the official installer puts them program_files = os.environ.get("ProgramFiles", "") best_version = (0, 0) best_path = None for name in os.listdir(program_files): if name.startswith("OpenJPEG "): version = tuple(int(x) for x in name[9:].strip().split(".")) if version > best_version: best_version = version best_path = os.path.join(program_files, name) if best_path: _dbg("Adding %s to search list", best_path) _add_directory(library_dirs, os.path.join(best_path, "lib")) _add_directory(include_dirs, os.path.join(best_path, "include")) # # insert new dirs *before* default libs, to avoid conflicts # between Python PYD stub libs and real libraries self.compiler.library_dirs = library_dirs + self.compiler.library_dirs self.compiler.include_dirs = include_dirs + self.compiler.include_dirs # # look for available libraries feature = self.feature if feature.want("zlib"): _dbg("Looking for zlib") if _find_include_file(self, "zlib.h"): if _find_library_file(self, "z"): feature.zlib = "z" elif sys.platform == "win32" and _find_library_file(self, "zlib"): feature.zlib = "zlib" # alternative name if feature.want("jpeg"): _dbg("Looking for jpeg") if _find_include_file(self, "jpeglib.h"): if _find_library_file(self, "jpeg"): feature.jpeg = "jpeg" elif sys.platform == "win32" and _find_library_file(self, "libjpeg"): feature.jpeg = "libjpeg" # alternative name feature.openjpeg_version = None if feature.want("jpeg2000"): _dbg("Looking for jpeg2000") best_version = None best_path = None # Find the best version for directory in self.compiler.include_dirs: _dbg("Checking for openjpeg-#.# in %s", directory) try: listdir = os.listdir(directory) except Exception: # OSError, FileNotFoundError continue for name in listdir: if name.startswith("openjpeg-") and os.path.isfile( os.path.join(directory, name, "openjpeg.h") ): _dbg("Found openjpeg.h in %s/%s", (directory, name)) version = tuple(int(x) for x in name[9:].split(".")) if best_version is None or version > best_version: best_version = version best_path = os.path.join(directory, name) _dbg( "Best openjpeg version %s so far in %s", (best_version, best_path), ) if best_version and _find_library_file(self, "openjp2"): # Add the directory to the include path so we can include # <openjpeg.h> rather than having to cope with the versioned # include path # FIXME (melvyn-sopacua): # At this point it's possible that best_path is already in # self.compiler.include_dirs. Should investigate how that is # possible. _add_directory(self.compiler.include_dirs, best_path, 0) feature.jpeg2000 = "openjp2" feature.openjpeg_version = ".".join(str(x) for x in best_version) if feature.want("imagequant"): _dbg("Looking for imagequant") if _find_include_file(self, "libimagequant.h"): if _find_library_file(self, "imagequant"): feature.imagequant = "imagequant" elif _find_library_file(self, "libimagequant"): feature.imagequant = "libimagequant" if feature.want("tiff"): _dbg("Looking for tiff") if _find_include_file(self, "tiff.h"): if _find_library_file(self, "tiff"): feature.tiff = "tiff" if sys.platform in ["win32", "darwin"] and _find_library_file( self, "libtiff" ): feature.tiff = "libtiff" if feature.want("freetype"): _dbg("Looking for freetype") if _find_library_file(self, "freetype"): # look for freetype2 include files freetype_version = 0 for subdir in self.compiler.include_dirs: _dbg("Checking for include file %s in %s", ("ft2build.h", subdir)) if os.path.isfile(os.path.join(subdir, "ft2build.h")): _dbg("Found %s in %s", ("ft2build.h", subdir)) freetype_version = 21 subdir = os.path.join(subdir, "freetype2") break subdir = os.path.join(subdir, "freetype2") _dbg("Checking for include file %s in %s", ("ft2build.h", subdir)) if os.path.isfile(os.path.join(subdir, "ft2build.h")): _dbg("Found %s in %s", ("ft2build.h", subdir)) freetype_version = 21 break if freetype_version: feature.freetype = "freetype" if subdir: _add_directory(self.compiler.include_dirs, subdir, 0) if feature.freetype and feature.want("raqm"): if not feature.want_vendor("raqm"): # want system Raqm _dbg("Looking for Raqm") if _find_include_file(self, "raqm.h"): if _find_library_file(self, "raqm"): feature.raqm = "raqm" elif _find_library_file(self, "libraqm"): feature.raqm = "libraqm" else: # want to build Raqm from src/thirdparty _dbg("Looking for HarfBuzz") feature.harfbuzz = None hb_dir = _find_include_dir(self, "harfbuzz", "hb.h") if hb_dir: if isinstance(hb_dir, str): _add_directory(self.compiler.include_dirs, hb_dir, 0) if _find_library_file(self, "harfbuzz"): feature.harfbuzz = "harfbuzz" if feature.harfbuzz: if not feature.want_vendor("fribidi"): # want system FriBiDi _dbg("Looking for FriBiDi") feature.fribidi = None fribidi_dir = _find_include_dir(self, "fribidi", "fribidi.h") if fribidi_dir: if isinstance(fribidi_dir, str): _add_directory( self.compiler.include_dirs, fribidi_dir, 0 ) if _find_library_file(self, "fribidi"): feature.fribidi = "fribidi" feature.raqm = True else: # want to build FriBiDi shim from src/thirdparty feature.raqm = True if feature.want("lcms"): _dbg("Looking for lcms") if _find_include_file(self, "lcms2.h"): if _find_library_file(self, "lcms2"): feature.lcms = "lcms2" elif _find_library_file(self, "lcms2_static"): # alternate Windows name. feature.lcms = "lcms2_static" if feature.want("webp"): _dbg("Looking for webp") if _find_include_file(self, "webp/encode.h") and _find_include_file( self, "webp/decode.h" ): # In Google's precompiled zip it is call "libwebp": if _find_library_file(self, "webp"): feature.webp = "webp" elif _find_library_file(self, "libwebp"): feature.webp = "libwebp" if feature.want("webpmux"): _dbg("Looking for webpmux") if _find_include_file(self, "webp/mux.h") and _find_include_file( self, "webp/demux.h" ): if _find_library_file(self, "webpmux") and _find_library_file( self, "webpdemux" ): feature.webpmux = "webpmux" if _find_library_file(self, "libwebpmux") and _find_library_file( self, "libwebpdemux" ): feature.webpmux = "libwebpmux" if feature.want("xcb"): _dbg("Looking for xcb") if _find_include_file(self, "xcb/xcb.h"): if _find_library_file(self, "xcb"): feature.xcb = "xcb" for f in feature: if not getattr(feature, f) and feature.require(f): if f in ("jpeg", "zlib"): raise RequiredDependencyException(f) raise DependencyException(f) # # core library libs = self.add_imaging_libs.split() defs = [] if feature.jpeg: libs.append(feature.jpeg) defs.append(("HAVE_LIBJPEG", None)) if feature.jpeg2000: libs.append(feature.jpeg2000) defs.append(("HAVE_OPENJPEG", None)) if sys.platform == "win32" and not PLATFORM_MINGW: defs.append(("OPJ_STATIC", None)) if feature.zlib: libs.append(feature.zlib) defs.append(("HAVE_LIBZ", None)) if feature.imagequant: libs.append(feature.imagequant) defs.append(("HAVE_LIBIMAGEQUANT", None)) if feature.tiff: libs.append(feature.tiff) defs.append(("HAVE_LIBTIFF", None)) if sys.platform == "win32": # This define needs to be defined if-and-only-if it was defined # when compiling LibTIFF. LibTIFF doesn't expose it in `tiffconf.h`, # so we have to guess; by default it is defined in all Windows builds. # See #4237, #5243, #5359 for more information. defs.append(("USE_WIN32_FILEIO", None)) if feature.xcb: libs.append(feature.xcb) defs.append(("HAVE_XCB", None)) if sys.platform == "win32": libs.extend(["kernel32", "user32", "gdi32"]) if struct.unpack("h", b"\0\1")[0] == 1: defs.append(("WORDS_BIGENDIAN", None)) if ( sys.platform == "win32" and sys.version_info < (3, 9) and not (PLATFORM_PYPY or PLATFORM_MINGW) ): defs.append(("PILLOW_VERSION", f'"\\"{PILLOW_VERSION}\\""')) else: defs.append(("PILLOW_VERSION", f'"{PILLOW_VERSION}"')) self._update_extension("PIL._imaging", libs, defs) # # additional libraries if feature.freetype: srcs = [] libs = ["freetype"] defs = [] if feature.raqm: if not feature.want_vendor("raqm"): # using system Raqm defs.append(("HAVE_RAQM", None)) defs.append(("HAVE_RAQM_SYSTEM", None)) libs.append(feature.raqm) else: # building Raqm from src/thirdparty defs.append(("HAVE_RAQM", None)) srcs.append("src/thirdparty/raqm/raqm.c") libs.append(feature.harfbuzz) if not feature.want_vendor("fribidi"): # using system FriBiDi defs.append(("HAVE_FRIBIDI_SYSTEM", None)) libs.append(feature.fribidi) else: # building FriBiDi shim from src/thirdparty srcs.append("src/thirdparty/fribidi-shim/fribidi.c") self._update_extension("PIL._imagingft", libs, defs, srcs) else: self._remove_extension("PIL._imagingft") if feature.lcms: extra = [] if sys.platform == "win32": extra.extend(["user32", "gdi32"]) self._update_extension("PIL._imagingcms", [feature.lcms] + extra) else: self._remove_extension("PIL._imagingcms") if feature.webp: libs = [feature.webp] defs = [] if feature.webpmux: defs.append(("HAVE_WEBPMUX", None)) libs.append(feature.webpmux) libs.append(feature.webpmux.replace("pmux", "pdemux")) self._update_extension("PIL._webp", libs, defs) else: self._remove_extension("PIL._webp") tk_libs = ["psapi"] if sys.platform in ("win32", "cygwin") else [] self._update_extension("PIL._imagingtk", tk_libs) build_ext.build_extensions(self) # # sanity checks self.summary_report(feature)
def build_extensions(self): for ext in self.extensions: self.patch_extension(ext) build_ext.build_extensions(self)