def get_cxxflags(): from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler from distutils import sysconfig from platform import system if system() == DARWIN_KEY: CXX_FLAGS["gcc"] = CXX_FLAGS["gcc-mac"] CXX_FLAGS["cc"] = CXX_FLAGS["clang"] CXX_FLAGS["c++"] = CXX_FLAGS["clang"] elif system() == LINUX_KEY: CXX_FLAGS["gcc"] = CXX_FLAGS["gcc-linux"] CXX_FLAGS["cc"] = CXX_FLAGS["gcc"] CXX_FLAGS["c++"] = CXX_FLAGS["gcc"] else: raise UnsupportedCompilerException("System: %s is not supported by HOPE" % system()) sysconfig.get_config_vars() #init vars compiler = new_compiler() customize_compiler(compiler) compiler_name = compiler.compiler[0].split("/")[-1] _check_version(compiler_name, compiler.compiler[0]) for name, flags in CXX_FLAGS.items(): if compiler_name.startswith(name): return flags raise UnsupportedCompilerException("Unknown compiler: {0}".format(compiler_name))
def find_python_library(): "Return the full path to the Python library (empty string if not found)" pyver = sysconfig.get_python_version() libpython_names = [ "python%s" % pyver.replace(".", ""), "python%smu" % pyver, "python%sm" % pyver, "python%su" % pyver, "python%s" % pyver, ] dirs = [ "%s/lib" % os.environ.get("PYTHON_DIR", ""), "%s" % sysconfig.get_config_vars().get("LIBDIR", ""), "/usr/lib/%s" % sysconfig.get_config_vars().get("MULTIARCH", ""), "/usr/local/lib", "/opt/local/lib", "/usr/lib", "/usr/lib64", ] libpython = None cc = new_compiler() for name in libpython_names: libpython = cc.find_library_file(dirs, name) if libpython is not None: break return libpython or ""
def finalize_options(self): undefined_temp = self.build_temp is None self.set_undefined_options('build', ('build_scripts', 'build_dir'), ('build_temp', 'build_temp'), ('compiler', 'compiler'), ('debug', 'debug'), ('force', 'force')) if undefined_temp: self.build_temp = os.path.join(self.build_temp, 'scripts') self.scripts = self.distribution.scripts or [] # Get the linker arguments for building executables if os.name == 'posix': args = sysconfig.get_config_vars('LDFLAGS', 'LINKFORSHARED') self.link_preargs = ' '.join(args).split() args = sysconfig.get_config_vars('LIBS', 'MODLIBS', 'SYSLIBS', 'LDLAST') self.link_postargs = ' '.join(args).split() else: self.link_preargs = [] self.link_postargs = [] # Get the extension for executables self.exe_extension = sysconfig.get_config_var('EXE') or '' if self.debug and os.name == 'nt': self.exe_extension = '_d' + self.exe_extension return
def test_customize_compiler(self): # Make sure that sysconfig._config_vars is initialized sysconfig.get_config_vars() os.environ['AR'] = 'env_ar' os.environ['CC'] = 'env_cc' os.environ['CPP'] = 'env_cpp' os.environ['CXX'] = 'env_cxx --env-cxx-flags' os.environ['LDSHARED'] = 'env_ldshared' os.environ['LDFLAGS'] = '--env-ldflags' os.environ['ARFLAGS'] = '--env-arflags' os.environ['CFLAGS'] = '--env-cflags' os.environ['CPPFLAGS'] = '--env-cppflags' comp = self.customize_compiler() self.assertEqual(comp.exes['archiver'], 'env_ar --env-arflags') self.assertEqual(comp.exes['preprocessor'], 'env_cpp --env-cppflags') self.assertEqual(comp.exes['compiler'], 'env_cc --sc-cflags --env-cflags --env-cppflags') self.assertEqual(comp.exes['compiler_so'], ('env_cc --sc-cflags ' '--env-cflags ''--env-cppflags --sc-ccshared')) self.assertEqual(comp.exes['compiler_cxx'], 'env_cxx --env-cxx-flags') self.assertEqual(comp.exes['linker_exe'], 'env_cc') self.assertEqual(comp.exes['linker_so'], ('env_ldshared --env-ldflags --env-cflags' ' --env-cppflags')) self.assertEqual(comp.shared_lib_extension, 'sc_shutil_suffix') del os.environ['AR'] del os.environ['CC'] del os.environ['CPP'] del os.environ['CXX'] del os.environ['LDSHARED'] del os.environ['LDFLAGS'] del os.environ['ARFLAGS'] del os.environ['CFLAGS'] del os.environ['CPPFLAGS'] comp = self.customize_compiler() self.assertEqual(comp.exes['archiver'], 'sc_ar --sc-arflags') self.assertEqual(comp.exes['preprocessor'], 'sc_cc -E') self.assertEqual(comp.exes['compiler'], 'sc_cc --sc-cflags') self.assertEqual(comp.exes['compiler_so'], 'sc_cc --sc-cflags --sc-ccshared') self.assertEqual(comp.exes['compiler_cxx'], 'sc_cxx') self.assertEqual(comp.exes['linker_exe'], 'sc_cc') self.assertEqual(comp.exes['linker_so'], 'sc_ldshared') self.assertEqual(comp.shared_lib_extension, 'sc_shutil_suffix')
def get_venv_basedir(): """Returns the base directory of the virtualenv, useful to read configuration and plugins""" exec_prefix = get_config_vars()['exec_prefix'] if hasattr(sys, 'real_prefix') is False or exec_prefix.startswith(sys.real_prefix): raise EnvironmentError('You must be in a virtual environment') return os.path.abspath(get_config_vars()['exec_prefix'] + '/../')
def ask_supports_thread(): from distutils.core import Distribution from distutils.sysconfig import get_config_vars get_config_vars() # workaround for a bug of distutils, e.g. on OS/X config = Distribution().get_command_obj('config') ok = config.try_compile('__thread int some_threadlocal_variable_42;') if ok: define_macros.append(('USE__THREAD', None)) else: sys.stderr.write("Note: will not use '__thread' in the C code\n") sys.stderr.write("The above error message can be safely ignored\n")
def inspect_cuda(): """ Return cuda device information and nvcc/cuda setup """ nvcc_settings = nvcc_compiler_settings() sysconfig.get_config_vars() nvcc_compiler = ccompiler.new_compiler() sysconfig.customize_compiler(nvcc_compiler) customize_compiler_for_nvcc(nvcc_compiler, nvcc_settings) output = inspect_cuda_version_and_devices(nvcc_compiler, nvcc_settings) return json.loads(output), nvcc_settings
def remove_from_cflags(flags): if not windows_check(): keys = ["OPT", "CFLAGS"] if python_version == '2.5': keys = ["CFLAGS"] for key in keys: cv_opt = sysconfig.get_config_vars()[key] for flag in flags: cv_opt = cv_opt.replace(flag, " ") sysconfig.get_config_vars()[key] = " ".join(cv_opt.split())
def __init__(self): self.flags = ( 'OPT', 'CFLAGS', 'CPPFLAGS', 'EXTRA_CFLAGS', 'BASECFLAGS', 'PY_CFLAGS', ) self.state = sysconfig.get_config_vars(*self.flags) self.config_vars = sysconfig.get_config_vars()
def _linker_vars(file_exts): linking_compiler = get_config_vars("LDSHARED")[0] file_exts = set(file_exts) runtime_libs = "" cxx = False if 'cpp' in file_exts: linking_compiler = get_config_vars("LDCXXSHARED")[0] cxx = True if 'f90' in file_exts: if cxx: runtime_libs = "-lc -lstdc++" linking_compiler = " ".join([compiler['f90']] + linking_compiler.split()[1:]) return (linking_compiler, runtime_libs)
def run(self) : #print self.distribution.find_config_files() print self.distribution.command_options self.distribution.parse_config_files() print self.prefix print self.get_sub_commands() print self.__dict__ print get_config_vars('prefix') # delete scripts for script in self.distribution.scripts : self.remove_path(os.path.basename(script))
def get_compiler_command(): """ Returns the name of the executable used by the default compiler on the system used by distutils to build C extensions. """ get_config_vars() compiler = new_compiler() customize_compiler(compiler) if isinstance(compiler, MSVCCompiler): compiler.initialize() # Normally the compiler path will be quoted as it contains spaces return '"%s"' % compiler.cc return compiler.compiler[0]
def _fixup_compiler(): if 'CC' in os.environ: # CC is in the environment, always use explicit # overrides. return try: # Newer version of python have support for dealing with # the compiler mess w.r.t. various versions of Apple's SDKs import _osx_support _osx_support.customize_compiler(get_config_vars()) except (ImportError, AttributeError, KeyError): pass cc = oldcc = get_config_var('CC').split()[0] cc = _find_executable(cc) if cc is not None and os.path.basename(cc).startswith('gcc'): # Check if compiler is LLVM-GCC, that's known to # generate bad code. data = os.popen("'%s' --version 2>/dev/null"%( cc.replace("'", "'\"'\"'"),)).read() if 'llvm-gcc' in data: cc = None if cc is not None and not _working_compiler(cc): cc = None if cc is None: # Default compiler is not useable, try finding 'clang' cc = _find_executable('clang') if cc is None: cc = os.popen("/usr/bin/xcrun -find clang").read() if not cc: raise DistutilsPlatformError("Cannot locate compiler candidate") if not _working_compiler(cc): raise DistutilsPlatformError("Cannot locate a working compiler") if cc != oldcc: log.info("Use '%s' instead of '%s' as the compiler"%(cc, oldcc)) vars = get_config_vars() for env in ('BLDSHARED', 'LDSHARED', 'CC', 'CXX'): if env in vars and env not in os.environ: split = vars[env].split() split[0] = cc if env != 'CXX' else cc + '++' vars[env] = ' '.join(split)
def initialize_options(self, *args): from distutils.sysconfig import get_config_vars (opt,) = get_config_vars('OPT') if opt: os.environ['OPT'] = ' '.join(flag for flag in opt.split() if flag and flag != '-Wstrict-prototypes') build_ext.initialize_options(self)
def configure_pyexe(exe, config_cmd): from distutils import sysconfig if sys.platform.startswith('win'): return if (sys.platform == 'darwin' and ('Anaconda' in sys.version or 'Continuum Analytics' in sys.version)): py_version = sysconfig.get_python_version() py_abiflags = getattr(sys, 'abiflags', '') exe.libraries += ['python' + py_version + py_abiflags] return # from distutils.util import split_quoted cfg_vars = sysconfig.get_config_vars() libraries = [] library_dirs = [] link_args = [] if not sysconfig.get_config_var('Py_ENABLE_SHARED'): py_version = sysconfig.get_python_version() py_abiflags = getattr(sys, 'abiflags', '') libraries = ['python' + py_version + py_abiflags] if sys.platform == 'darwin': fwkdir = cfg_vars.get('PYTHONFRAMEWORKDIR') if (fwkdir and fwkdir != 'no-framework' and fwkdir in cfg_vars.get('LINKFORSHARED', '')): del libraries[:] for var in ('LIBDIR', 'LIBPL'): library_dirs += split_quoted(cfg_vars.get(var, '')) for var in ('LDFLAGS', 'LIBS', 'MODLIBS', 'SYSLIBS', 'LDLAST'): link_args += split_quoted(cfg_vars.get(var, '')) exe.libraries += libraries exe.library_dirs += library_dirs exe.extra_link_args += link_args
def test_version_int(self): source = self.mkdtemp() target = self.mkdtemp() expected = self.write_sample_scripts(source) cmd = self.get_build_scripts_cmd(target, [os.path.join(source, fn) for fn in expected]) cmd.finalize_options() # http://bugs.python.org/issue4524 # # On linux-g++-32 with command line `./configure --enable-ipv6 # --with-suffix=3`, python is compiled okay but the build scripts # failed when writing the name of the executable old = sysconfig.get_config_vars().get('VERSION') sysconfig._config_vars['VERSION'] = 4 try: cmd.run() finally: if old is not None: sysconfig._config_vars['VERSION'] = old built = os.listdir(target) for name in expected: self.assert_(name in built)
def setUp(self): if "agent_remote_stress_test" not in config: raise SkipTest() virtualenv_home = sysconfig.get_config_vars("exec_prefix")[0] log_path = os.path.join(virtualenv_home, "logs") mkdir_p(log_path) log_file = os.path.join(log_path, self.id() + ".log") logger = logging.getLogger() handler = logging.FileHandler(log_file, mode='w', encoding=None, delay=False) logger.addHandler(handler) logging.info("PY_STRESS_START:%d" % int(time.time() * 1000)) if "host" in config["agent_remote_stress_test"]: self.hosts = [config["agent_remote_stress_test"]["host"]] elif "hostfile" in config["agent_remote_stress_test"]: with open(config["agent_remote_stress_test"]["hostfile"]) as f: self.hosts = f.read().splitlines() self.vms_per_thread = int(config["agent_remote_stress_test"].get( "vms_per_thread", self.DEFAULT_VMS_PER_THREAD)) self.threads_per_host = int(config["agent_remote_stress_test"].get( "threads_per_host", self.DEFAULT_THREADS_PER_HOST)) self.place_to_create_ratio = int( config["agent_remote_stress_test"].get( "place_to_create_ratio", self.DEFAULT_PLACE_TO_CREATE_RATIO)) self.clear()
def customize_compiler2(compiler): (cc, cxx, opt, cflags, ccshared, ldshared, so_ext) = \ sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED', 'SO') if 0: print "cc=", cc print "cxx=", cxx print "opt=", opt print "cflags=", cflags print "ccshared=", ccshared cflags = cflags.replace("-DNDEBUG", "") cflags = cflags.replace("-O2", "") cpp = cc + " -E" cc_cmd = cc + ' ' + cflags compiler.set_executables( preprocessor=cpp, compiler=cc_cmd, compiler_so=cc_cmd + ' ' + ccshared, compiler_cxx=cxx, linker_so=ldshared, linker_exe=cc) compiler.shared_lib_extension = so_ext return
def test_ext_fullpath(self): ext = sysconfig.get_config_vars()['SO'] dist = Distribution() cmd = build_ext(dist) cmd.inplace = 1 cmd.distribution.package_dir = {'': 'src'} cmd.distribution.packages = ['lxml', 'lxml.html'] curdir = os.getcwd() wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext) path = cmd.get_ext_fullpath('lxml.etree') self.assertEqual(wanted, path) cmd.inplace = 0 cmd.build_lib = os.path.join(curdir, 'tmpdir') wanted = os.path.join(curdir, 'tmpdir', 'lxml', 'etree' + ext) path = cmd.get_ext_fullpath('lxml.etree') self.assertEqual(wanted, path) build_py = cmd.get_finalized_command('build_py') build_py.package_dir = {} cmd.distribution.packages = ['twisted', 'twisted.runner.portmap'] path = cmd.get_ext_fullpath('twisted.runner.portmap') wanted = os.path.join(curdir, 'tmpdir', 'twisted', 'runner', 'portmap' + ext) self.assertEqual(wanted, path) cmd.inplace = 1 path = cmd.get_ext_fullpath('twisted.runner.portmap') wanted = os.path.join(curdir, 'twisted', 'runner', 'portmap' + ext) self.assertEqual(wanted, path)
def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): """Compile a single source files with a Unix-style compiler.""" # HP ad-hoc fix, see ticket 1383 ccomp = self.compiler_so if ccomp[0] == 'aCC': # remove flags that will trigger ANSI-C mode for aCC if '-Ae' in ccomp: ccomp.remove('-Ae') if '-Aa' in ccomp: ccomp.remove('-Aa') # add flags for (almost) sane C++ handling ccomp += ['-AA'] self.compiler_so = ccomp # ensure OPT environment variable is read if 'OPT' in os.environ: from distutils.sysconfig import get_config_vars opt = " ".join(os.environ['OPT'].split()) gcv_opt = " ".join(get_config_vars('OPT')[0].split()) ccomp_s = " ".join(self.compiler_so) if opt not in ccomp_s: ccomp_s = ccomp_s.replace(gcv_opt, opt) self.compiler_so = ccomp_s.split() llink_s = " ".join(self.linker_so) if opt not in llink_s: self.linker_so = llink_s.split() + opt.split() display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) try: self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + extra_postargs, display = display) except DistutilsExecError: msg = str(get_exception()) raise CompileError(msg)
def setup_install(): 'Perform the equivalent of "python setup.py install".' from distutils.core import setup, Extension from distutils.sysconfig import get_config_vars from ncptl_config import expanded_ncptl_config # Ensure we were passed the location of our input files. if len(sys.argv) < 3: abend('"install" requires the name of the top build directory') top_builddir = sys.argv[2] srcfile = os.path.abspath(os.path.join(top_builddir, "libncptl_wrap.c")) libdir = os.path.abspath(os.path.join(top_builddir, ".libs")) # Be sure to link with whatever libraries libncptl needs. linkargs = string.split(expanded_ncptl_config["LDFLAGS"] + " " + expanded_ncptl_config["LIBS"]) # Use Python's distutils to install everything in a "conceptual" directory. distobj = setup(name='conceptual-python', version=expanded_ncptl_config["PACKAGE_VERSION"], script_args=["install"] + sys.argv[3:], package_dir={"conceptual": top_builddir, "": top_builddir}, py_modules=["pyncptl", "conceptual.__init__"], ext_modules=[Extension("conceptual._pyncptl", sources=[srcfile], extra_compile_args=get_config_vars("CCSHARED") + string.split(expanded_ncptl_config["CPPFLAGS"]), extra_link_args=linkargs + string.split(expanded_ncptl_config["LDFLAGS"]), library_dirs=[libdir], libraries=["ncptl"])])
def build_extensions(self): import platform from distutils import sysconfig if (hasattr(self.compiler, 'compiler') and len(self.compiler.compiler) > 0): cc_name = self.compiler.compiler[0] stdcpp = '-std=c++11' if 'gcc' in cc_name and not _check_gcc_cpp11(cc_name): stdcpp = '-std=c++0x' for e in self.extensions: e.extra_compile_args.append(stdcpp) e.extra_compile_args.append('-Wno-deprecated-declarations') e.extra_compile_args.append('-Wno-unused-local-typedefs') e.extra_compile_args.append('-Wno-sign-compare') e.extra_compile_args.append('-Wno-self-assign') e.extra_compile_args.append('-Wno-macro-redefined') e.extra_compile_args.append('-Wno-unused-const-variable') conf_vars = sysconfig.get_config_vars() if 'MACOSX_DEPLOYMENT_TARGET' in conf_vars and len(conf_vars[ 'MACOSX_DEPLOYMENT_TARGET']) > 0: _v1, _v2 = conf_vars['MACOSX_DEPLOYMENT_TARGET'].split('.') if int(_v1) == 10 and int(_v2) < 9: stdcpp = '--stdlib=libc++' for e in self.extensions: e.extra_compile_args.append(stdcpp) build_ext.build_extensions(self)
def test_ext_fullpath(self): ext = sysconfig.get_config_vars()["SO"] dist = Distribution() cmd = build_ext(dist) cmd.inplace = 1 cmd.distribution.package_dir = {"": "src"} cmd.distribution.packages = ["lxml", "lxml.html"] curdir = os.getcwd() wanted = os.path.join(curdir, "src", "lxml", "etree" + ext) path = cmd.get_ext_fullpath("lxml.etree") self.assertEquals(wanted, path) # building lxml.etree not inplace cmd.inplace = 0 cmd.build_lib = os.path.join(curdir, "tmpdir") wanted = os.path.join(curdir, "tmpdir", "lxml", "etree" + ext) path = cmd.get_ext_fullpath("lxml.etree") self.assertEquals(wanted, path) # building twisted.runner.portmap not inplace build_py = cmd.get_finalized_command("build_py") build_py.package_dir = {} cmd.distribution.packages = ["twisted", "twisted.runner.portmap"] path = cmd.get_ext_fullpath("twisted.runner.portmap") wanted = os.path.join(curdir, "tmpdir", "twisted", "runner", "portmap" + ext) self.assertEquals(wanted, path) # building twisted.runner.portmap inplace cmd.inplace = 1 path = cmd.get_ext_fullpath("twisted.runner.portmap") wanted = os.path.join(curdir, "twisted", "runner", "portmap" + ext) self.assertEquals(wanted, path)
def my_init_posix(): print('my_init_posix: changing gcc to g++') save_init_posix() g = sysconfig.get_config_vars() g['CC'] = 'g++' g['LDSHARED'] = 'g++ -shared' g['PY_CFLAGS'] = g['PY_CFLAGS'].replace('-O3', '')
def run(self): verify_platform() if not self.use_system_libffi: for ext in self.extensions: if ext.name == 'objc._objc': if ext.sources[:-len(FFI_SOURCE)] != FFI_SOURCE: ext.sources.extend(FFI_SOURCE) ext.extra_compile_args.extend(FFI_CFLAGS) if self.deployment_target is not None: os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.deployment_target if self.sdk_root != 'python': if '-isysroot' not in CFLAGS: CFLAGS.extend(['-isysroot', self.sdk_root]) EXT_CFLAGS.extend(['-isysroot', self.sdk_root]) OBJC_LDFLAGS.extend(['-isysroot', self.sdk_root]) cflags = get_config_var('CFLAGS') if '-mno-fused-madd' in cflags: cflags = cflags.replace('-mno-fused-madd', '') get_config_vars()['CFLAGS'] = cflags _fixup_compiler() build_ext.build_ext.run(self) extensions = self.extensions self.extensions = [ e for e in extensions if e.name.startswith('PyObjCTest') ] self.copy_extensions_to_source() self.extensions = extensions
def main(): # Use Makefile for windows. distutils doesn't work well with windows. if sys.platform == 'win32': pydll = ('C:\windows\system32\python%s.dll' % sysconfig.get_config_vars('VERSION')[0]) os.system('make -f Makefile.win32 %s PY_HOME="%s" PY_DLL="%s"' % (' '.join(sys.argv[1:]), sys.prefix, pydll)) return pacparser_module = Extension('_pacparser', include_dirs = ['../spidermonkey/js/src', '..'], sources = ['pacparser_py.c'], extra_objects = ['../pacparser.o', '../libjs.a']) setup (name = 'pacparser', version = '1', description = 'Pacparser package', author = 'Manu Garg', author_email = '*****@*****.**', url = 'http://code.google.com/p/pacparser', long_description = 'python library to parse proxy auto-config (PAC) ' 'files.', license = 'LGPL', ext_package = 'pacparser', ext_modules = [pacparser_module], py_modules = ['pacparser.__init__'])
def run(self): here = os.path.abspath(os.getcwd()) from distutils import sysconfig config = sysconfig.get_config_vars() try: restore = senv(("CFLAGS", config["CFLAGS"]), ("LDFLAGS", config["LDFLAGS"])) try: os.chdir(LRMQDIST()) if not os.path.isfile("config.h"): print("- configure rabbitmq-c...") os.system(CMD_CONFIGURE) # print('- make rabbitmq-c...') # os.chdir(LRMQSRC()) # os.system(''%s' all' % find_make()) finally: os.environ.update(restore) finally: os.chdir(here) restore = senv( # ('LDFLAGS', ' '.join(glob(LRMQSRC('*.o')))), ("CFLAGS", " ".join(self.stdcflags)) ) codegen() try: _build.run(self) finally: os.environ.update(restore)
def run(self): here = os.path.abspath(os.getcwd()) H = lambda *x: os.path.join(here, *x) from distutils import sysconfig config = sysconfig.get_config_vars() try: restore = senv(('CFLAGS', config['CFLAGS']), ('LDFLAGS', config['LDFLAGS'])) try: os.chdir(LRMQDIST()) if not os.path.isfile('config.h'): print('- configure rabbitmq-c...') os.system('/bin/sh configure --disable-tools \ --disable-docs --disable-dependency-tracking') #print('- make rabbitmq-c...') #os.chdir(LRMQSRC()) #os.system(''%s' all' % find_make()) finally: os.environ.update(restore) finally: os.chdir(here) restore = senv( #('LDFLAGS', ' '.join(glob(LRMQSRC('*.o')))), ('CFLAGS', ' '.join(self.stdcflags)), ) codegen() try: _build.run(self) finally: os.environ.update(restore)
def test_get_python_inc(self): # The check for srcdir is copied from Python's setup.py, # and is necessary to make this test pass when building # Python in a directory other than the source directory. (srcdir,) = sysconfig.get_config_vars('srcdir') if not srcdir: inc_dir = sysconfig.get_python_inc() else: # This test is not really a proper test: when building # Python from source, even in the same directory, # we won't be testing the same thing as when running # distutils' tests on an installed Python. Nevertheless, # let's try to do our best: if we are running Python's # unittests from a build directory that is not the source # directory, the normal inc_dir will exist, it will just not # contain anything of interest. inc_dir = sysconfig.get_python_inc() self.assert_(os.path.isdir(inc_dir)) # Now test the source location, to make sure Python.h does # exist. inc_dir = os.path.join(os.getcwd(), srcdir, 'Include') inc_dir = os.path.normpath(inc_dir) self.assert_(os.path.isdir(inc_dir), inc_dir) python_h = os.path.join(inc_dir, "Python.h") self.assert_(os.path.isfile(python_h), python_h)
def run(self): here = os.path.abspath(os.getcwd()) H = lambda *x: os.path.join(here, *x) from distutils import sysconfig config = sysconfig.get_config_vars() try: restore = senv(("CFLAGS", config["CFLAGS"]), ("LDFLAGS", config["LDFLAGS"])) try: os.chdir(LRMQDIST()) if not os.path.isfile("config.h"): print("- configure rabbitmq-c...") os.system("/bin/sh configure --disable-dependency-tracking") #print("- make rabbitmq-c...") #os.chdir(LRMQSRC()) #os.system('"%s" all' % find_make()) finally: os.environ.update(restore) finally: os.chdir(here) restore = senv( #("LDFLAGS", ' '.join(glob(LRMQSRC("*.o")))), ("CFLAGS", ' '.join(self.stdcflags)), ) codegen() try: _build.run(self) finally: os.environ.update(restore)
readme_content = "" finally: f.close() PY3 = sys.version_info[0] == 3 # PYTHON-654 - Clang doesn't support -mno-fused-madd but the pythons Apple # ships are built with it. This is a problem starting with Xcode 5.1 # since clang 3.4 errors out when it encounters unrecognized compiler # flags. This hack removes -mno-fused-madd from the CFLAGS automatically # generated by distutils for Apple provided pythons, allowing C extension # builds to complete without error. The inspiration comes from older # versions of distutils.sysconfig.get_config_vars. if sys.platform == 'darwin' and 'clang' in platform.python_compiler().lower(): from distutils.sysconfig import get_config_vars res = get_config_vars() for key in ('CFLAGS', 'PY_CFLAGS'): if key in res: flags = res[key] flags = re.sub('-mno-fused-madd', '', flags) res[key] = flags nose_config_options = { 'with-xunit': '1', # Write out nosetests.xml for CI. 'py3where': 'build', # Tell nose where to find tests under PY3. } def write_nose_config(): """Write out setup.cfg. Since py3where has to be set for tests to run correctly in Python 3 we create this on the fly.
def run(self): if use_system(): return # use Python's build environment variables build_env = {key: val for key, val in get_config_vars().items() if key in ("LDFLAGS", "CFLAGS", "CC", "CCSHARED", "LDSHARED") and key not in os.environ} os.environ.update(build_env) # Ensure our temporary build directory exists build_temp = os.path.abspath(self.build_temp) try: os.makedirs(build_temp) except OSError as e: if e.errno != errno.EEXIST: raise # Ensure all of our executable files have their permission set for filename in [ "src/libsodium/autogen.sh", "src/libsodium/compile", "src/libsodium/configure", "src/libsodium/depcomp", "src/libsodium/install-sh", "src/libsodium/missing", "src/libsodium/msvc-scripts/process.bat", "src/libsodium/test/default/wintest.bat"]: os.chmod(here(filename), 0o755) if not which("make"): raise Exception("ERROR: The 'make' utility is missing from PATH") # Locate our configure script configure = abshere("src/libsodium/configure") # Run ./configure configure_flags = ["--disable-shared", "--enable-static", "--disable-debug", "--disable-dependency-tracking", "--with-pic"] if platform.system() == "SunOS": # On Solaris, libssp doesn't link statically and causes linker # errors during import configure_flags.append("--disable-ssp") if os.environ.get('SODIUM_INSTALL_MINIMAL'): configure_flags.append("--enable-minimal") subprocess.check_call( [configure] + configure_flags + ["--prefix", os.path.abspath(self.build_clib)], cwd=build_temp, ) make_args = os.environ.get('LIBSODIUM_MAKE_ARGS', '').split() # Build the library subprocess.check_call(["make"] + make_args, cwd=build_temp) # Check the build library subprocess.check_call(["make", "check"] + make_args, cwd=build_temp) # Install the built library subprocess.check_call(["make", "install"] + make_args, cwd=build_temp)
# # Adjust distutils CFLAGS: # # - PyObjC won't work when compiled with -O0 # - To make it easier to debug reduce optimization level # to -O1 when building with a --with-pydebug build of Python # - Set optimization to -O4 with normal builds of Python, # enables link-time optimization with clang and appears to # be (slightly) faster. # if "-O0" in get_config_var("CFLAGS"): # -O0 doesn't work with some (older?) compilers, unconditionally # change -O0 to -O1 to work around that issue. print("Change -O0 to -O1 (-O0 miscompiles libffi)") config_vars = get_config_vars() for k in config_vars: if isinstance(config_vars[k], str) and "-O0" in config_vars[k]: config_vars[k] = config_vars[k].replace("-O0", "-O1") if get_config_var("Py_DEBUG"): # Running with Py_DEBUG, reduce optimization level # to make it easier to debug the code. cfg_vars = get_config_vars() for k in cfg_vars: if isinstance(cfg_vars[k], str) and "-O2" in cfg_vars[k]: cfg_vars[k] = cfg_vars[k].replace("-O2", "-O1 -g") elif isinstance(cfg_vars[k], str) and "-O3" in cfg_vars[k]: cfg_vars[k] = cfg_vars[k].replace("-O3", "-O1 -g") else:
def test_SO_in_vars(self): vars = sysconfig.get_config_vars() self.assertIsNotNone(vars['SO']) self.assertEqual(vars['SO'], vars['EXT_SUFFIX'])
def configure_step(self): """Configure Python package build/install.""" if self.python_cmd is None: self.prepare_python() if self.sitecfg is not None: # used by some extensions, like numpy, to find certain libs finaltxt = self.sitecfg if self.sitecfglibdir: repl = self.sitecfglibdir finaltxt = finaltxt.replace('SITECFGLIBDIR', repl) if self.sitecfgincdir: repl = self.sitecfgincdir finaltxt = finaltxt.replace('SITECFGINCDIR', repl) self.log.debug("Using %s: %s" % (self.sitecfgfn, finaltxt)) try: if os.path.exists(self.sitecfgfn): txt = open(self.sitecfgfn).read() self.log.debug("Found %s: %s" % (self.sitecfgfn, txt)) config = open(self.sitecfgfn, 'w') config.write(finaltxt) config.close() except IOError: raise EasyBuildError("Creating %s failed", self.sitecfgfn) # ensure that LDSHARED uses CC if self.cfg.get('check_ldshared', False): curr_cc = os.getenv('CC') python_ldshared = get_config_vars('LDSHARED')[0] if python_ldshared and curr_cc: if python_ldshared.split(' ')[0] == curr_cc: self.log.info( "Python's value for $LDSHARED ('%s') uses current $CC value ('%s'), not touching it", python_ldshared, curr_cc) else: self.log.info( "Python's value for $LDSHARED ('%s') doesn't use current $CC value ('%s'), fixing", python_ldshared, curr_cc) env.setvar("LDSHARED", curr_cc + " -shared") else: if curr_cc: self.log.info( "No $LDSHARED found for Python, setting to '%s -shared'", curr_cc) env.setvar("LDSHARED", curr_cc + " -shared") else: self.log.info( "No value set for $CC, so not touching $LDSHARED either" ) # creates log entries for python being used, for debugging run_cmd("%s -V" % self.python_cmd, verbose=False, trace=False) run_cmd("%s -c 'import sys; print(sys.executable)'" % self.python_cmd, verbose=False, trace=False) # don't add user site directory to sys.path (equivalent to python -s) # see https://www.python.org/dev/peps/pep-0370/ env.setvar('PYTHONNOUSERSITE', '1', verbose=False) run_cmd("%s -c 'import sys; print(sys.path)'" % self.python_cmd, verbose=False, trace=False)
def finalize_options(self): """Finalizes options.""" # This method (and its helpers, like 'finalize_unix()', # 'finalize_other()', and 'select_scheme()') is where the default # installation directories for modules, extension modules, and # anything else we care to install from a Python module # distribution. Thus, this code makes a pretty important policy # statement about how third-party stuff is added to a Python # installation! Note that the actual work of installation is done # by the relatively simple 'install_*' commands; they just take # their orders from the installation directory options determined # here. # Check for errors/inconsistencies in the options; first, stuff # that's wrong on any platform. if ((self.prefix or self.exec_prefix or self.home) and (self.install_base or self.install_platbase)): raise DistutilsOptionError( "must supply either prefix/exec-prefix/home or " + "install-base/install-platbase -- not both") if self.home and (self.prefix or self.exec_prefix): raise DistutilsOptionError( "must supply either home or prefix/exec-prefix -- not both") if self.user and (self.prefix or self.exec_prefix or self.home or self.install_base or self.install_platbase): raise DistutilsOptionError( "can't combine user with prefix, " "exec_prefix/home, or install_(plat)base") # Next, stuff that's wrong (or dubious) only on certain platforms. if os.name != "posix": if self.exec_prefix: self.warn("exec-prefix option ignored on this platform") self.exec_prefix = None # Now the interesting logic -- so interesting that we farm it out # to other methods. The goal of these methods is to set the final # values for the install_{lib,scripts,data,...} options, using as # input a heady brew of prefix, exec_prefix, home, install_base, # install_platbase, user-supplied versions of # install_{purelib,platlib,lib,scripts,data,...}, and the # INSTALL_SCHEME dictionary above. Phew! self.dump_dirs("pre-finalize_{unix,other}") if os.name == 'posix': self.finalize_unix() else: self.finalize_other() self.dump_dirs("post-finalize_{unix,other}()") # Expand configuration variables, tilde, etc. in self.install_base # and self.install_platbase -- that way, we can use $base or # $platbase in the other installation directories and not worry # about needing recursive variable expansion (shudder). py_version = sys.version.split()[0] (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix') try: abiflags = sys.abiflags except AttributeError: # sys.abiflags may not be defined on all platforms. abiflags = '' self.config_vars = { 'dist_name': self.distribution.get_name, 'dist_version': self.distribution.get_version(), 'dist_fullname': self.distribution.get_fullname(), 'py_version': py_version, 'py_version_short': '%d.%d' % sys.version_info[:2], 'py_version_nodot': '%d%d' % sys.version_info[:2], 'sys_prefix': prefix, 'prefix': prefix, 'sys_exec_prefix': exec_prefix, 'exec_prefix': exec_prefix, 'abiflags': abiflags, 'platlibdir': getattr(sys, 'platlibdir', 'lib'), } if HAS_USER_SITE: self.config_vars['userbase'] = self.install_userbase self.config_vars['usersite'] = self.install_usersite self.expand_basedirs() self.dump_dirs("post-expand_basedirs()") # Now define config vars for the base directories so we can expand # everything else. self.config_vars['base'] = self.install_base self.config_vars['platbase'] = self.install_platbase if DEBUG: from pprint import pprint print("config vars:") pprint(self.config_vars) # Expand "~" and configuration variables in the installation # directories. self.expand_dirs() self.dump_dirs("post-expand_dirs()") # Create directories in the home dir: if self.user: self.create_home_path() # Pick the actual directory to install all modules to: either # install_purelib or install_platlib, depending on whether this # module distribution is pure or not. Of course, if the user # already specified install_lib, use their selection. if self.install_lib is None: if self.distribution.ext_modules: # has extensions: non-pure self.install_lib = self.install_platlib else: self.install_lib = self.install_purelib # Convert directories from Unix /-separated syntax to the local # convention. self.convert_paths('lib', 'purelib', 'platlib', 'scripts', 'data', 'headers', 'userbase', 'usersite') # Deprecated # Well, we're not actually fully completely finalized yet: we still # have to deal with 'extra_path', which is the hack for allowing # non-packagized module distributions (hello, Numerical Python!) to # get their own directories. self.handle_extra_path() self.install_libbase = self.install_lib # needed for .pth file self.install_lib = os.path.join(self.install_lib, self.extra_dirs) # If a new root directory was supplied, make all the installation # dirs relative to it. if self.root is not None: self.change_roots('libbase', 'lib', 'purelib', 'platlib', 'scripts', 'data', 'headers') self.dump_dirs("after prepending root") # Find out the build directories, ie. where to install from. self.set_undefined_options('build', ('build_base', 'build_base'), ('build_lib', 'build_lib'))
import sys import os from distutils.core import setup from distutils.extension import Extension from Cython.Distutils import build_ext from numpy.distutils.system_info import default_include_dirs, default_lib_dirs from distutils.sysconfig import get_config_vars if sys.platform == 'win32': libraries = ["arb", "flint", "mpir", "mpfr", "pthreads"] else: libraries = ["arb", "flint"] (opt, ) = get_config_vars('OPT') os.environ['OPT'] = " ".join(flag for flag in opt.split() if flag != '-Wstrict-prototypes') default_include_dirs += [ os.path.join(d, "flint") for d in default_include_dirs ] ext_modules = [ Extension("flint._flint", ["src/flint/pyflint.pyx"], libraries=libraries, library_dirs=default_lib_dirs, include_dirs=default_include_dirs) ] for e in ext_modules: e.cython_directives = {"embedsignature": True}
# No information available print "none" sys.exit(1) if sys.argv[1] == '--includes': inc = sysconfig.get_python_inc() plat = sysconfig.get_python_inc(plat_specific=1) if inc == plat: print "-I" + inc else: print "-I%s -I%s" % (inc, plat) sys.exit(0) if sys.argv[1] == '--compile': cc, basecflags, opt, ccshared = \ sysconfig.get_config_vars('CC', 'BASECFLAGS', 'OPT', 'CCSHARED') if basecflags: opt = basecflags + ' ' + opt print cc, opt, ccshared sys.exit(0) def add_option(options, name, value=None): """Add option to list of options""" options.append(name) if value is not None: options.append(value) def add_option_if_missing(options, name, value=None): """Add option to list of options, if it is not already present"""
class CompilerDetection(object): # Necessary for OSX. See https://github.com/SimTk/mdtraj/issues/576 # The problem is that distutils.sysconfig.customize_compiler() # is necessary to properly invoke the correct compiler for this class # (otherwise the CC env variable isn't respected). Unfortunately, # distutils.sysconfig.customize_compiler() DIES on OSX unless some # appropriate initialization routines have been called. This line # has a side effect of calling those initialzation routes, and is therefor # necessary for OSX, even though we don't use the result. _DONT_REMOVE_ME = get_config_vars() def __init__(self, disable_openmp): cc = new_compiler() customize_compiler(cc) self.msvc = cc.compiler_type == 'msvc' self._print_compiler_version(cc) if disable_openmp: self.openmp_enabled = False else: self.openmp_enabled, openmp_needs_gomp = self._detect_openmp() self.sse3_enabled = self._detect_sse3() if not self.msvc else True self.sse41_enabled = self._detect_sse41() if not self.msvc else True self.compiler_args_sse2 = ['-msse2' ] if not self.msvc else ['/arch:SSE2'] self.compiler_args_sse3 = ['-mssse3'] if (self.sse3_enabled and not self.msvc) else [] self.compiler_args_sse41, self.define_macros_sse41 = [], [] if self.sse41_enabled: self.define_macros_sse41 = [('__SSE4__', 1), ('__SSE4_1__', 1)] if not self.msvc: self.compiler_args_sse41 = ['-msse4'] if self.openmp_enabled: self.compiler_libraries_openmp = ['gomp' ] if openmp_needs_gomp else [] if self.msvc: self.compiler_args_openmp = ['/openmp'] else: self.compiler_args_openmp = ['-fopenmp'] else: self.compiler_libraries_openmp = [] self.compiler_args_openmp = [] if self.msvc: self.compiler_args_opt = ['/O2'] else: self.compiler_args_opt = ['-O3', '-funroll-loops'] print() def _print_compiler_version(self, cc): print("C compiler:") if self.msvc: if not cc.initialized: cc.initialize() cc.spawn([cc.cc]) else: cc.spawn([cc.compiler[0]] + ['-v']) def hasfunction(self, cc, funcname, include=None, extra_postargs=None): # From http://stackoverflow.com/questions/ # 7018879/disabling-output-when-compiling-with-distutils tmpdir = tempfile.mkdtemp(prefix='hasfunction-') devnull = oldstderr = None try: try: fname = os.path.join(tmpdir, 'funcname.c') f = open(fname, 'w') if include is not None: f.write('#include %s\n' % include) f.write('int main(void) {\n') f.write(' %s;\n' % funcname) f.write('}\n') f.close() devnull = open(os.devnull, 'w') oldstderr = os.dup(sys.stderr.fileno()) os.dup2(devnull.fileno(), sys.stderr.fileno()) objects = cc.compile([fname], output_dir=tmpdir, extra_postargs=extra_postargs) cc.link_executable(objects, os.path.join(tmpdir, 'a.out')) except Exception as e: return False return True finally: if oldstderr is not None: os.dup2(oldstderr, sys.stderr.fileno()) if devnull is not None: devnull.close() shutil.rmtree(tmpdir) def _print_support_start(self, feature): print('Attempting to autodetect {0:6} support...'.format(feature), end=' ') def _print_support_end(self, feature, status): if status is True: print('Compiler supports {0}'.format(feature)) else: print('Did not detect {0} support'.format(feature)) def _detect_openmp(self): self._print_support_start('OpenMP') compiler = new_compiler() customize_compiler(compiler) hasopenmp = self.hasfunction(compiler, 'omp_get_num_threads()', extra_postargs=['-fopenmp', '/openmp']) needs_gomp = hasopenmp if not hasopenmp: compiler.add_library('gomp') hasopenmp = self.hasfunction(compiler, 'omp_get_num_threads()') needs_gomp = hasopenmp self._print_support_end('OpenMP', hasopenmp) return hasopenmp, needs_gomp def _detect_sse3(self): "Does this compiler support SSE3 intrinsics?" compiler = new_compiler() customize_compiler(compiler) self._print_support_start('SSE3') result = self.hasfunction(compiler, '__m128 v; _mm_hadd_ps(v,v)', include='<pmmintrin.h>', extra_postargs=['-msse3']) self._print_support_end('SSE3', result) return result def _detect_sse41(self): "Does this compiler support SSE4.1 intrinsics?" compiler = new_compiler() customize_compiler(compiler) self._print_support_start('SSE4.1') result = self.hasfunction(compiler, '__m128 v; _mm_round_ps(v,0x00)', include='<smmintrin.h>', extra_postargs=['-msse4']) self._print_support_end('SSE4.1', result) return result
import numpy as np import os import platform import warnings # Import shared libgse2 # create library names lib_names = [ # platform specific library name 'libgse2-%s-%s-py%s' % (platform.system(), platform.architecture()[0], ''.join([str(i) for i in platform.python_version_tuple()[:2]])), # fallback for pre-packaged libraries 'libgse2'] # get default file extension for shared objects lib_extension, = sysconfig.get_config_vars('SO') # initialize library clibgse2 = None for lib_name in lib_names: try: clibgse2 = C.CDLL(os.path.join(os.path.dirname(__file__), 'lib', lib_name + lib_extension)) except Exception, e: pass else: break if not clibgse2: msg = 'Could not load shared library for obspy.gse2.\n\n %s' % (e) raise ImportError(msg)
extra_link_args.append("/DEBUG") else: # args to ignore warnings extra_compile_args = [] extra_link_args = [] if debugging_symbols_requested: extra_compile_args.append("-g") # Build for at least macOS 10.9 when compiling on a 10.9 system or above, # overriding CPython distuitls behaviour which is to target the version that # python was built for. This may be overridden by setting # MACOSX_DEPLOYMENT_TARGET before calling setup.py if is_platform_mac(): if "MACOSX_DEPLOYMENT_TARGET" not in os.environ: current_system = platform.mac_ver()[0] python_target = get_config_vars().get("MACOSX_DEPLOYMENT_TARGET", current_system) if (LooseVersion(python_target) < "10.9" and LooseVersion(current_system) >= "10.9"): os.environ["MACOSX_DEPLOYMENT_TARGET"] = "10.9" # enable coverage by building cython files by setting the environment variable # "PANDAS_CYTHON_COVERAGE" (with a Truthy value) or by running build_ext # with `--with-cython-coverage`enabled linetrace = os.environ.get("PANDAS_CYTHON_COVERAGE", False) if "--with-cython-coverage" in sys.argv: linetrace = True sys.argv.remove("--with-cython-coverage") # Note: if not using `cythonize`, coverage can be enabled by # pinning `ext.cython_directives = directives` to each ext in extensions. # github.com/cython/cython/wiki/enhancements-compilerdirectives#in-setuppy
sopt = '-%s' % sopt if lopt in sys.argv: sys.argv.remove(lopt) elif sopt in sys.argv: sys.argv.remove(sopt) sys.argv.remove(specsrc) except: pass print("Compiling datashm python module - Using sps.c source from", specsrc) if platform.system() == 'Darwin': # ALL default compiler / linker options come from _sysconfigdata from distutils.sysconfig import get_config_vars get_config_vars( "CFLAGS") # need to call get_config_vars once to populate _config_vars from distutils.sysconfig import _config_vars # 'CCSHARED': '-arch x86_64 -arch i386 -pipe', _config_vars['CCSHARED'] = '-arch x86_64 -pipe' # 'LDSHARED': 'cc -bundle -undefined dynamic_lookup -arch x86_64 -arch i386 -Wl,-F.', _config_vars[ 'LDSHARED'] = '/usr/bin/clang -bundle -undefined dynamic_lookup -arch x86_64 -Wl,-F.' #_config_vars.build_time_vars['OPT']='-DNDEBUG -g -fwrapv -Os -Wall -Wstrict-prototypes' _config_vars['OPT'] = '-DNDEBUG -g -fwrapv -Os -Wall -Wstrict-prototypes' from os import environ environ['CFLAGS'] = '-Qunused-arguments' # appended to CFLAGS or CCSHARED #environ['BASECFLAGS'] = '' #environ['OPT'] = ''
def compile_c_module(cfiles, modbasename, eci, tmpdir=None): #try: # from distutils.log import set_threshold # set_threshold(10000) #except ImportError: # print "ERROR IMPORTING" # pass cfiles = [py.path.local(f) for f in cfiles] if tmpdir is None: tmpdir = configdir.join("module_cache").ensure(dir=1) num = 0 cfiles += eci.separate_module_files include_dirs = list(eci.include_dirs) library_dirs = list(eci.library_dirs) if sys.platform == 'darwin': # support Fink & Darwinports for s in ('/sw/', '/opt/local/'): if s + 'include' not in include_dirs and \ os.path.exists(s + 'include'): include_dirs.append(s + 'include') if s + 'lib' not in library_dirs and \ os.path.exists(s + 'lib'): library_dirs.append(s + 'lib') num = 0 modname = modbasename while 1: if not tmpdir.join(modname + so_ext).check(): break num += 1 modname = '%s_%d' % (modbasename, num) lastdir = tmpdir.chdir() libraries = eci.libraries ensure_correct_math() try: if debug: print "modname", modname c = stdoutcapture.Capture(mixed_out_err=True) try: try: if compiler_command(): # GCC-ish options only from distutils import sysconfig gcv = sysconfig.get_config_vars() cmd = compiler_command().replace('%s', str(tmpdir.join(modname))) for dir in [gcv['INCLUDEPY']] + list(include_dirs): cmd += ' -I%s' % dir for dir in library_dirs: cmd += ' -L%s' % dir os.system(cmd) else: from distutils.dist import Distribution from distutils.extension import Extension from distutils.ccompiler import get_default_compiler saved_environ = os.environ.items() try: # distutils.core.setup() is really meant for end-user # interactive usage, because it eats most exceptions and # turn them into SystemExits. Instead, we directly # instantiate a Distribution, which also allows us to # ignore unwanted features like config files. extra_compile_args = [] # ensure correct math on windows if sys.platform == 'win32': extra_compile_args.append( '/Op') # get extra precision if get_default_compiler() == 'unix': old_version = False try: g = os.popen('gcc --version', 'r') verinfo = g.read() g.close() except (OSError, IOError): pass else: old_version = verinfo.startswith('2') if not old_version: extra_compile_args.extend([ "-Wno-unused-label", "-Wno-unused-variable" ]) attrs = { 'name': "testmodule", 'ext_modules': [ Extension( modname, [str(cfile) for cfile in cfiles], include_dirs=include_dirs, library_dirs=library_dirs, extra_compile_args=extra_compile_args, libraries=list(libraries), ) ], 'script_name': 'setup.py', 'script_args': ['-q', 'build_ext', '--inplace', '--force'], } dist = Distribution(attrs) if not dist.parse_command_line(): raise ValueError, "distutils cmdline parse error" dist.run_commands() finally: for key, value in saved_environ: if os.environ.get(key) != value: os.environ[key] = value finally: foutput, foutput = c.done() data = foutput.read() if data: fdump = open("%s.errors" % modname, "w") fdump.write(data) fdump.close() # XXX do we need to do some check on fout/ferr? # XXX not a nice way to import a module except: print >> sys.stderr, data raise finally: lastdir.chdir() return str(tmpdir.join(modname) + so_ext)
def _fixup_compiler(use_ccache): if "CC" in os.environ: # CC is in the environment, always use explicit # overrides. return try: # Newer version of python have support for dealing with # the compiler mess w.r.t. various versions of Apple's SDKs import _osx_support _osx_support.customize_compiler(get_config_vars()) except (ImportError, AttributeError, KeyError): pass cc = oldcc = get_config_var("CC").split()[0] cc = _find_executable(cc) if cc is not None and os.path.basename(cc).startswith("gcc"): # Check if compiler is LLVM-GCC, that's known to # generate bad code. with os.popen("'%s' --version 2>/dev/null" % (cc.replace("'", "'\"'\"'"), )) as fp: data = fp.read() if "llvm-gcc" in data: cc = None if cc is not None and not _working_compiler(cc): cc = None if cc is None: # Default compiler is not useable, try finding 'clang' cc = _find_executable("clang") if cc is None: cc = os.popen("/usr/bin/xcrun -find clang").read() if not cc: raise DistutilsPlatformError("Cannot locate compiler candidate") if not _working_compiler(cc): raise DistutilsPlatformError("Cannot locate a working compiler") if use_ccache: p = _find_executable("ccache") if p is not None: log.info("Detected and using 'ccache'") cc = "%s %s" % (p, cc) if cc != oldcc: log.info("Use '%s' instead of '%s' as the compiler" % (cc, oldcc)) config_vars = get_config_vars() for env in ("BLDSHARED", "LDSHARED", "CC", "CXX"): if env in config_vars and env not in os.environ: split = config_vars[env].split() split[0] = cc if env != "CXX" else cc + "++" config_vars[env] = " ".join(split) cflags = get_config_var("CFLAGS") if re.search(r"-arch\s+i386", cflags) is not None: raise DistutilsPlatformError( "i386 (32-bit) is not supported by PyObjC") if re.search(r"-arch\s+ppc", cflags) is not None: raise DistutilsPlatformError("PowerPC is not supported by PyObjC")
def get_platform(): """Return a string that identifies the current platform. This is used mainly to distinguish platform-specific build directories and platform-specific built distributions. Typically includes the OS name and version and the architecture (as supplied by 'os.uname()'), although the exact information included depends on the OS; eg. for IRIX the architecture isn't particularly important (IRIX only runs on SGI hardware), but for Linux the kernel version isn't particularly important. Examples of returned values: linux-i586 linux-alpha (?) solaris-2.6-sun4u irix-5.3 irix64-6.2 For non-POSIX platforms, currently just returns 'sys.platform'. """ if os.name != "posix" or not hasattr(os, 'uname'): # XXX what about the architecture? NT is Intel or Alpha, # Mac OS is M68k or PPC, etc. return sys.platform # Try to distinguish various flavours of Unix (osname, host, release, version, machine) = os.uname() # Convert the OS name to lowercase, remove '/' characters # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") osname = string.lower(osname) osname = string.replace(osname, '/', '') machine = string.replace(machine, ' ', '_') machine = string.replace(machine, '/', '-') if osname[:5] == "linux": # At least on Linux/Intel, 'machine' is the processor -- # i386, etc. # XXX what about Alpha, SPARC, etc? return "%s-%s" % (osname, machine) elif osname[:5] == "sunos": if release[0] >= "5": # SunOS 5 == Solaris 2 osname = "solaris" release = "%d.%s" % (int(release[0]) - 3, release[2:]) # fall through to standard osname-release-machine representation elif osname[:4] == "irix": # could be "irix64"! return "%s-%s" % (osname, release) elif osname[:3] == "aix": return "%s-%s.%s" % (osname, version, release) elif osname[:6] == "cygwin": osname = "cygwin" rel_re = re.compile(r'[\d.]+') m = rel_re.match(release) if m: release = m.group() elif osname[:6] == "darwin": # # For our purposes, we'll assume that the system version from # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set # to. This makes the compatibility story a bit more sane because the # machine is going to compile and link as if it were # MACOSX_DEPLOYMENT_TARGET. from distutils.sysconfig import get_config_vars cfgvars = get_config_vars() macver = os.environ.get('MACOSX_DEPLOYMENT_TARGET') if not macver: macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') if not macver: # Get the system version. Reading this plist is a documented # way to get the system version (see the documentation for # the Gestalt Manager) try: f = open('/System/Library/CoreServices/SystemVersion.plist') except IOError: # We're on a plain darwin box, fall back to the default # behaviour. pass else: m = re.search( r'<key>ProductUserVisibleVersion</key>\s*' + r'<string>(.*?)</string>', f.read()) f.close() if m is not None: macver = '.'.join(m.group(1).split('.')[:2]) # else: fall back to the default behaviour if macver: from distutils.sysconfig import get_config_vars release = macver osname = 'macosx' platver = os.uname()[2] osmajor = int(platver.split('.')[0]) if osmajor >= 8 and \ get_config_vars().get('UNIVERSALSDK', '').strip(): # The universal build will build fat binaries, but not on # systems before 10.4 machine = 'fat' elif machine in ('PowerPC', 'Power_Macintosh'): # Pick a sane name for the PPC architecture machine = 'ppc' return "%s-%s-%s" % (osname, release, machine)
def test_get_config_vars(self): cvars = sysconfig.get_config_vars() self.assertIsInstance(cvars, dict) self.assertTrue(cvars)
def run(self): # Get the names of library files Name of shared object # library, and ensure -bundle isn't called in linking on osx. libtype = 'shared' rdirs = ['$ORIGIN'] ldirs = [] ext_runtime_library_dirs = '$ORIGIN/{}' ext_extra_link_args = None osx = None if 'osx' in self.plat_name or 'darwin' in self.plat_name: osx = True print('\n\nBuilding under OSX!\n\n\n') libtype = 'dylib' from distutils import sysconfig vars = sysconfig.get_config_vars() vars['LDSHARED'] = vars['LDSHARED'].replace( '-bundle', '-dynamiclib') rdirs = [] ldirs = ['-Wl,-rpath,'+'@loader_path/'] ldirs = [] ext_runtime_library_dirs = None ext_extra_link_args = '-Wl,-rpath,@loader_path/{}' install_name_pattern = '-Wl,-install_name,@rpath/{}' # Ensure the directories and files are in appropriate locations. setup_building() # Before we can build the extensions, we have to run ./configure # and make for HDF5. basedir = os.getcwd() os.chdir('hdf5') env = os.environ if not FAKEBUILDING: subprocess.check_call('./configure', env=env) subprocess.check_call('make', env=env) os.chdir(basedir) # Now we need to get the header files we need copied into our # output directory. if not os.path.isdir('include'): os.mkdir('include') if not os.path.isdir(os.path.join('include', 'star')): os.mkdir(os.path.join('include', 'star')) # Copy hdf5 needed header files over. shutil.copy(os.path.join('hdf5', 'src', 'hdf5.h'), 'include') shutil.copy(os.path.join('hdf5', 'src', 'H5public.h'), 'include') shutil.copy(os.path.join('hdf5', 'src', 'H5pubconf.h'), 'include') shutil.copy(os.path.join('hdf5', 'src', 'H5version.h'), 'include') # Get the sources for the ndf and hds dependencies. hds_source_dep = [] for name_ in HDS_DEP_LIBS: hds_source_dep += get_source(name_) ndf_source_dep = [] for name_ in ['prm', 'ast', 'ary']: ndf_source_dep += get_source(name_) hdsex_includedirs = [ 'include', 'hds', 'missingincludes', 'hds_missingincludes', os.path.join('hdf5', 'src'), os.path.join('hdf5', 'hl', 'src'), 'starutil', 'starmem', 'cnf', 'ems', 'mers', 'chr', 'one', numpy.get_include() ] from starlink import Ast ndfex_includedirs = hdsex_includedirs + \ ['prm', 'ast', 'ary', 'ast_missingincludes', Ast.get_include()] define_macros = get_starlink_macros(osx=osx) # Now build all. # This is the directory where the extra library's built here # have to be copied to, relative to the final build. This must # be called '.libs' if you want to use this with # cibuildwheel/auditwheel. extra_lib_dir = '.libs' # Get the compilers. compiler = ccompiler.new_compiler(dry_run=FAKEBUILDING, verbose=0) compiler2 = ccompiler.new_compiler(verbose=1) # Ensure we have any distutils options set. customize_compiler(compiler) customize_compiler(compiler2) # Now go through each extension, build the shared libraries we # need and ensure they are copied to the build directory. We # will use rpath and $ORIGIN to ensure everything is portable # as it will be moved around during the build process by pip # etc. for ext in self.extensions: linked_libraries = [] if ext.name == 'starlink.hds': hds_deps = compiler.compile(sources=hds_source_dep, output_dir=OUTPUTDIR, macros=define_macros, include_dirs=HDS_DEP_INCLUDES, depends=hds_source_dep, ) hds_deps_libname = compiler2.library_filename('pyhdsdeps', lib_type=libtype) # Build this into a library print('Linking HDSDEPS\n\n\n') extra_preargs = None if osx: extra_preargs = ['-v', '-Wl,-v', install_name_pattern.format(hds_deps_libname)] compiler2.link('shared', hds_deps, hds_deps_libname, output_dir=OUTPUTDIR, extra_preargs=extra_preargs, extra_postargs=None) linked_libraries += [os.path.join(OUTPUTDIR, hds_deps_libname)] # Now build hds-v4 and hds-v5: have to do this separately. hdsv4_libname = compiler.library_filename('pyhdsv4', lib_type=libtype) hdsv4objs = compiler.compile(sources=get_source('hds-v4'), output_dir=OUTPUTDIR, macros=define_macros, include_dirs=('hds-v4_missingincludes',) + HDS_DEP_INCLUDES, depends=get_source('hds-v4')) extra_preargs = None if osx: extra_preargs = [install_name_pattern.format(hdsv4_libname)] print('Linking HDSV4\n\n\n') compiler2.link('shared', hdsv4objs, hdsv4_libname, output_dir=OUTPUTDIR, extra_preargs=extra_preargs, target_lang='c') linked_libraries += [os.path.join(OUTPUTDIR, hdsv4_libname)] print('CREATING HDF5 LIBRARY\n\n\n') # Create the HDF5 library hdf5_libpath = os.path.join('hdf5', 'src', '.libs') hdf5_objects = glob.glob(os.path.join(hdf5_libpath, '*.o')) hdf5_libname = compiler.library_filename('pystarhdf5', lib_type=libtype) extra_preargs = None if osx: extra_preargs = [install_name_pattern.format(hdf5_libname)] compiler2.link('shared', hdf5_objects, hdf5_libname, output_dir=OUTPUTDIR, library_dirs=[OUTPUTDIR], runtime_library_dirs=rdirs, extra_postargs=ldirs, extra_preargs=extra_preargs) linked_libraries += [os.path.join(OUTPUTDIR, hdf5_libname)] hdsv5_libname = compiler2.library_filename('pyhdsv5', lib_type=libtype) hdsv5objs = compiler.compile(sources=get_source('hds-v5'), output_dir=OUTPUTDIR, macros=define_macros, include_dirs=('hds-v5_missingincludes',) + HDS_DEP_INCLUDES, depends=get_source('hds-v5')) extra_preargs = None if osx: extra_preargs = [install_name_pattern.format(hdsv5_libname)] compiler2.link('shared', hdsv5objs, hdsv5_libname, output_dir=OUTPUTDIR, libraries=['pystarhdf5'], library_dirs=[OUTPUTDIR], runtime_library_dirs=rdirs, extra_postargs=ldirs, extra_preargs=extra_preargs) linked_libraries += [os.path.join(OUTPUTDIR, hdsv5_libname)] hds_libname = compiler2.library_filename('pyhds', lib_type=libtype) hdsobjs = compiler.compile(sources=get_source('hds'), output_dir=OUTPUTDIR, macros=define_macros, include_dirs=hdsex_includedirs, depends=get_source('hds')) extra_preargs = None if osx: extra_preargs = [install_name_pattern.format(hds_libname)] compiler2.link('shared', hdsobjs, hds_libname, output_dir=OUTPUTDIR, libraries=['pyhdsdeps', 'pyhdsv5', 'pyhdsv4'], library_dirs=[OUTPUTDIR], runtime_library_dirs=rdirs, extra_postargs=ldirs, extra_preargs=extra_preargs) linked_libraries += [os.path.join(OUTPUTDIR, hds_libname)] ext.libraries += ['pyhds'] ext.library_dirs += [OUTPUTDIR] if ext_runtime_library_dirs: ext.runtime_library_dirs += [ext_runtime_library_dirs.format(extra_lib_dir)] if ext_extra_link_args: ext.extra_link_args += [ext_extra_link_args.format(extra_lib_dir)] if ext.name == 'starlink.ndf': ndf_libname = compiler2.library_filename('pyndf', lib_type=libtype) ndfobjs = compiler.compile(sources=get_source('ndf') + ndf_source_dep, include_dirs=['ndf/', 'ndf_missingincludes/'] + ndfex_includedirs, macros=define_macros, depends=get_source('ndf')) extra_preargs = None if osx: extra_preargs = [install_name_pattern.format(ndf_libname)] compiler2.link('shared', ndfobjs, ndf_libname, output_dir=OUTPUTDIR, libraries=['pyhdsdeps', 'pyhdsv5', 'pyhdsv4', 'pyhds'], library_dirs=[OUTPUTDIR], runtime_library_dirs=rdirs, extra_postargs=ldirs, extra_preargs=extra_preargs) linked_libraries += [os.path.join(OUTPUTDIR, ndf_libname)] ext.libraries += ['pyndf'] ext.library_dirs += [OUTPUTDIR] if ext_runtime_library_dirs: ext.runtime_library_dirs += [ext_runtime_library_dirs.format(extra_lib_dir)] if ext_extra_link_args: ext.extra_link_args += [ext_extra_link_args.format(extra_lib_dir)] # Copy over the libraries to the build directory manually, and add to package data. if not os.path.isdir(os.path.join(self.build_lib, 'starlink', extra_lib_dir)): os.mkdir(os.path.join(self.build_lib, 'starlink', extra_lib_dir)) for lib in linked_libraries: shutil.copy(lib, os.path.join(self.build_lib, 'starlink', extra_lib_dir)) output_lib = os.path.join('starlink', extra_lib_dir, os.path.split(lib)[1]) self.distribution.package_data.get('starlink', list()).extend(output_lib) # Run the standard build_ext process. build_ext.run(self)
#! /usr/bin/env python # BSD 2-Clause License # Copyright (c) 2016, Roberto Souza and collaborators # All rights reserved. # System imports from distutils.core import * from distutils import sysconfig import os (opt,) = sysconfig.get_config_vars('OPT') os.environ['OPT'] = " ".join(flag for flag in opt.split() if flag != '-Wstrict-prototypes') # Third-party modules - we depend on numpy for everything import numpy # Obtain the numpy include directory. This logic works across numpy versions. try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() # get_numpy_include() def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read()
def prep_build(self, ext): if not CMakeBuild.hasbuilt: from distutils.sysconfig import get_python_inc import distutils.sysconfig as sysconfig cfg = self.cfg_type() extdir = os.path.abspath( os.path.dirname(self.get_ext_fullpath(ext.name))) lcb_api_flags = self.get_lcb_api_flags() cmake_args = lcb_api_flags + ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir, '-DPYTHON_EXECUTABLE=' + sys.executable] cmake_args += ['-DPYTHON_INCLUDE_DIR={}'.format(get_python_inc())] self.info.setbase(self.build_temp) self.info.cfg=cfg from distutils import sysconfig import os.path as op v = sysconfig.get_config_vars() print("LIBDIR {}, LIBPL {}".format(v.get("LIBDIR"), v.get("LIBPL"))) fpaths = [op.join(v.get(pv, ''), v.get('LDLIBRARY', '')) for pv in ('LIBDIR', 'LIBPL')] + [os.path.normpath( os.path.join(get_python_inc(), "..", "..", "lib", "libpython{}.dylib".format('.'.join(map(str, sys.version_info[0:2]))))), os.path.join(get_python_inc(), "..", "..", "lib")] python_lib = None python_libdir = None for entry in fpaths: if not op.exists(entry): print("fpath {} does not exist".format(entry)) continue try: print("got fpath {}:".format(entry)) if op.isfile(entry): print("fpath {} is file, selecting".format(entry)) python_lib = python_lib or entry continue else: entries = os.listdir(entry) print("fpath {} is directory, contents {}".format(entry, entries)) for subentry in entries: fullname = op.normpath(op.join(entry, subentry)) try: fullname = op.readlink(fullname) except: pass print("trying subentry:{}".format(fullname)) if op.exists(fullname): python_lib = python_lib or fullname python_libdir = op.normpath(entry) print("got match {}, breaking out".format(fullname)) continue except: pass cmake_args += ['-DHYBRID_BUILD=TRUE'] if CMakeBuild.hybrid else [] cmake_args += ['-DPYTHON_LIBFILE={}'.format(python_lib)] if python_lib else [] cmake_args += ['-DPYTHON_LIBDIR={}'.format(python_libdir)] if python_libdir else [] cmake_args += [ '-DPYTHON_VERSION_EXACT={}'.format('.'.join(map(str, sys.version_info[0:2])))] if python_libdir else [] build_args = ['--config', cfg] if platform.system() == "Windows": cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format( cfg.upper(), extdir), '-DLCB_NO_MOCK=1'] if sys.maxsize > 2 ** 32: cmake_args += ['-A', 'x64'] build_args += ['--', '/m'] else: cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg.upper()] build_args += ['--', '-j2'] env = os.environ.copy() python_executable = win_cmake_path(sys.executable) pass_path = False if re.match(r'.*(CONAN|ALL).*',PYCBC_SSL_FETCH): try: import conans.conan env['PATH'] = env['PATH']+";{}".format(os.path.dirname(conans.conan.__file__)) pass_path = True except: logging.warning("Cannot find conan : {}".format(traceback.format_exc())) if re.match(r'.*(GITHUB|ALL).*', PYCBC_SSL_FETCH): pass_path = True if pass_path: pathsep = ';' if platform.system().lower().startswith('win') else ':' env['PYTHONPATH'] = pathsep.join(sys.path) cmake_args += [ '-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON', '-DPYTHON_EXECUTABLE={}'.format(python_executable)] if PYCBC_SSL_FETCH: cmake_args += ['-DPYCBC_SSL_FETCH={}'.format(PYCBC_SSL_FETCH)] PYCBC_CMAKE_DEBUG = env.get('PYCBC_CMAKE_DEBUG') if PYCBC_CMAKE_DEBUG: cmake_args += [ '--trace-source=CMakeLists.txt', '--trace-expand'] cxx_compile_args=filter(re.compile(r'^(?!-std\s*=\s*c(11|99)).*').match, ext.extra_compile_args) env['CXXFLAGS'] = '{} {} -DVERSION_INFO=\\"{}\\"'.format( env.get('CXXFLAGS', ''), ' '.join(cxx_compile_args), self.distribution.get_version()) env['CFLAGS'] = '{} {}'.format( env.get('CFLAGS', ''), ' '.join(ext.extra_compile_args), self.distribution.get_version()) print("Launching build with env: {}, build_args: {}, cmake_args: {}".format(env, build_args, cmake_args)) if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, stdout=sys.stdout, stderr=sys.stdout, cwd=self.build_temp, env=env) subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp) CMakeBuild.hasbuilt = True build_dir = os.path.realpath(self.build_lib) if CMakeBuild.hybrid: if not self.compiler: self.run() for name in self.info.entries(): try: pkg_build_dir=os.path.join(build_dir, cbuild_config.couchbase_core) self.copy_binary_to(cfg, pkg_build_dir, self.info.lcb_pkgs_srcs(), name) self.copy_binary_to(cfg, self.info.pkg_data_dir, self.info.lcb_pkgs_srcs(), name) except: print("failure") raise
def setup(): try: from setuptools import setup as setup_, Extension except ImportError: from distutils.core import setup as setup_, Extension from distutils.spawn import find_executable from distutils.sysconfig import get_config_vars scripts_dir = os.path.join(dir_name, 'scripts') if os.name == 'nt': scripts = [os.path.join(scripts_dir, f) for f in os.listdir(scripts_dir)] else: scripts = [os.path.join(scripts_dir, f) for f in os.listdir(scripts_dir) if not f.endswith('.bat')] packages = ['xdress', 'xdress.clang', 'xdress._enum'] pack_dir = {'xdress': 'xdress', 'xdress.clang': 'xdress/clang', 'xdress._enum': 'xdress/_enum'} pack_data = {'xdress': ['*.pxd', '*.pyx', '*.h', '*.cpp'], 'xdress._enum': ['LICENSE', 'README']} # llvm+clang configuration can be controlled by the environment variables # LLVM_CONFIG, LLVM_CPPFLAGS, LLVM_LDFLAGS, and CLANG_LIBS. LLVM_CONFIG is # not used if both LLVM_CPPFLAGS and LLVM_LDFLAGS are set. if 'LLVM_CPPFLAGS' in os.environ and 'LLVM_LDFLAGS' in os.environ: llvm_config = True # Will be unused below else: if 'LLVM_CONFIG' in os.environ: llvm_config = os.environ['LLVM_CONFIG'] else: options = 'llvm-config llvm-config-3.5 llvm-config-3.4 llvm-config-3.3 llvm-config-3.2'.split() for p in options: p = find_executable(p) if p is not None: print('using llvm-config from %s'%p) llvm_config = p break else: print('Disabling clang since llvm-config not found: tried %s'%', '.join(options)) print('To override, set the LLVM_CONFIG environment variable.') llvm_config = None if llvm_config is not None: try: llvm_cppflags = ( os.environ.get('LLVM_CPPFLAGS') or subprocess.check_output([llvm_config,'--cppflags'])).split() llvm_ldflags = ( os.environ.get('LLVM_LDFLAGS') or subprocess.check_output([llvm_config,'--ldflags','--libs'])).split() except OSError as e: raise OSError("Failed to run llvm-config program '%s': %s" % (llvm_config, e)) clang_dir = os.path.join(dir_name, 'xdress', 'clang') clang_src_dir = os.path.join(clang_dir, 'src') clang_libs = ( os.environ.get('CLANG_LIBS') or '''clangTooling clangFrontend clangDriver clangSerialization clangCodeGen clangParse clangSema clangStaticAnalyzerFrontend clangStaticAnalyzerCheckers clangStaticAnalyzerCore clangAnalysis clangARCMigrate clangEdit clangRewriteCore clangAST clangLex clangBasic''').split() # If the user sets CFLAGS, make sure we still have our own include path first if 'CFLAGS' in os.environ: os.environ['CFLAGS'] = '-I%s '%clang_dir + os.environ['CFLAGS'] # Remove -Wstrict-prototypes to prevent warnings in libclang C++ code, # following http://stackoverflow.com/questions/8106258. opt, = get_config_vars('OPT') os.environ['OPT'] = ' '.join(f for f in opt.split() if f != '-Wstrict-prototypes') modules = [Extension('xdress.clang.libclang', sources=glob.glob(os.path.join(clang_src_dir, '*.cpp')), define_macros=[('XDRESS', 1)], include_dirs=[clang_dir], extra_compile_args=llvm_cppflags+['-fno-rtti'], extra_link_args=llvm_ldflags, libraries=clang_libs, language='c++')] else: modules = () setup_kwargs = { "name": "xdress", "version": INFO['version'], "description": 'xdress', "author": 'Anthony Scopatz', "author_email": '*****@*****.**', "url": 'http://xdress.org/', "packages": packages, "package_dir": pack_dir, "package_data": pack_data, "ext_modules": modules, "scripts": scripts, "description": ("Cython-based, NumPy-aware automatic wrapper generation for " "C / C++."), "long_description": long_desc, "download_url": ("https://github.com/scopatz/xdress/" "zipball/{0}.{1}").format(*xdress.version.xdress_version[:2]), "classifiers": [ "License :: OSI Approved :: BSD License", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Programming Language :: C", "Programming Language :: C++", "Programming Language :: Cython", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Scientific/Engineering", "Topic :: Software Development :: Code Generators", "Topic :: Software Development :: Compilers", "Topic :: Utilities", ], "data_files": [("", ['license', 'configure.py']),], } # changing dirs for virtualenv cwd = os.getcwd() os.chdir(dir_name) setup_(**setup_kwargs) os.chdir(cwd)
except ImportError: # No information available print("none") sys.exit(1) if sys.argv[1] == '--includes': inc = sysconfig.get_python_inc() plat = sysconfig.get_python_inc(plat_specific=1) if inc == plat: print("-I" + inc) else: print("-I%s -I%s" % (inc, plat)) sys.exit(0) if sys.argv[1] == '--compile': cc, ccshared = sysconfig.get_config_vars('CC', 'CCSHARED') print("%s %s" % (cc, ccshared)) sys.exit(0) def add_option(options, name, value=None): """Add option to list of options""" options.append(name) if value is not None: options.append(value) def add_option_if_missing(options, name, value=None): """Add option to list of options, if it is not already present""" if options.count(name) == 0 and options.count("-Wl,%s" % name) == 0: add_option(options, name, value)
def get_platform(): """Return a string that identifies the current platform. This is used mainly to distinguish platform-specific build directories and platform-specific built distributions. Typically includes the OS name and version and the architecture (as supplied by 'os.uname()'), although the exact information included depends on the OS; eg. for IRIX the architecture isn't particularly important (IRIX only runs on SGI hardware), but for Linux the kernel version isn't particularly important. Examples of returned values: linux-i586 linux-alpha (?) solaris-2.6-sun4u irix-5.3 irix64-6.2 Windows will return one of: win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) win-ia64 (64bit Windows on Itanium) win32 (all others - specifically, sys.platform is returned) For other non-POSIX platforms, currently just returns 'sys.platform'. """ if os.name == 'nt': # sniff sys.version for architecture. prefix = " bit (" i = sys.version.find(prefix) if i == -1: return sys.platform j = sys.version.find(")", i) look = sys.version[i + len(prefix):j].lower() if look == 'amd64': return 'win-amd64' if look == 'itanium': return 'win-ia64' return sys.platform if os.name != "posix" or not hasattr(os, 'uname'): # XXX what about the architecture? NT is Intel or Alpha, # Mac OS is M68k or PPC, etc. return sys.platform # Try to distinguish various flavours of Unix (osname, host, release, version, machine) = os.uname() # Convert the OS name to lowercase, remove '/' characters # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") osname = osname.lower().replace('/', '') machine = machine.replace(' ', '_') machine = machine.replace('/', '-') if osname[:5] == "linux": # At least on Linux/Intel, 'machine' is the processor -- # i386, etc. # XXX what about Alpha, SPARC, etc? return "%s-%s" % (osname, machine) elif osname[:5] == "sunos": if release[0] >= "5": # SunOS 5 == Solaris 2 osname = "solaris" release = "%d.%s" % (int(release[0]) - 3, release[2:]) # fall through to standard osname-release-machine representation elif osname[:4] == "irix": # could be "irix64"! return "%s-%s" % (osname, release) elif osname[:3] == "aix": return "%s-%s.%s" % (osname, version, release) elif osname[:6] == "cygwin": osname = "cygwin" rel_re = re.compile(r'[\d.]+', re.ASCII) m = rel_re.match(release) if m: release = m.group() elif osname[:6] == "darwin": # # For our purposes, we'll assume that the system version from # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set # to. This makes the compatibility story a bit more sane because the # machine is going to compile and link as if it were # MACOSX_DEPLOYMENT_TARGET. from distutils.sysconfig import get_config_vars cfgvars = get_config_vars() macver = os.environ.get('MACOSX_DEPLOYMENT_TARGET') if not macver: macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') if 1: # Always calculate the release of the running machine, # needed to determine if we can build fat binaries or not. macrelease = macver # Get the system version. Reading this plist is a documented # way to get the system version (see the documentation for # the Gestalt Manager) try: f = open('/System/Library/CoreServices/SystemVersion.plist') except IOError: # We're on a plain darwin box, fall back to the default # behaviour. pass else: m = re.search( r'<key>ProductUserVisibleVersion</key>\s*' + r'<string>(.*?)</string>', f.read()) f.close() if m is not None: macrelease = '.'.join(m.group(1).split('.')[:2]) # else: fall back to the default behaviour if not macver: macver = macrelease if macver: from distutils.sysconfig import get_config_vars release = macver osname = "macosx" if (macrelease + '.') >= '10.4.' and \ '-arch' in get_config_vars().get('CFLAGS', '').strip(): # The universal build will build fat binaries, but not on # systems before 10.4 # # Try to detect 4-way universal builds, those have machine-type # 'universal' instead of 'fat'. machine = 'fat' cflags = get_config_vars().get('CFLAGS') archs = re.findall('-arch\s+(\S+)', cflags) archs.sort() archs = tuple(archs) if len(archs) == 1: machine = archs[0] elif archs == ('i386', 'ppc'): machine = 'fat' elif archs == ('i386', 'x86_64'): machine = 'intel' elif archs == ('i386', 'ppc', 'x86_64'): machine = 'fat3' elif archs == ('ppc64', 'x86_64'): machine = 'fat64' elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): machine = 'universal' else: raise ValueError("Don't know machine value for archs=%r" % (archs, )) elif machine in ('PowerPC', 'Power_Macintosh'): # Pick a sane name for the PPC architecture. machine = 'ppc' return "%s-%s-%s" % (osname, release, machine)
lib_dirs = ReadConfig('build_ext', 'library_dirs', sysconfig.get_config_var('LIBDIR')).split(':') extra_compile_args = ReadConfig('cc_options', 'extra_compile_args', '').split() extra_link_args = ReadConfig('cc_options', 'extra_link_args', '').split() static_libs = [] deps = ['libgflags.a', 'libglog.a'] for dep in deps: for lib_dir in lib_dirs: path = os.path.join(lib_dir, dep) if os.path.isfile(path): static_libs.append(path) assert len(static_libs) == len(deps), (static_libs, deps, lib_dirs) cvars = sysconfig.get_config_vars() cvars['OPT'] = str.join( ' ', RemovePrefixes( cvars.get('OPT').split(), ['-g', '-O', '-Wstrict-prototypes'])) # Determine the current version of the package. The easiest way would be to # import "googleclouddebugger" and read its __version__ attribute. # Unfortunately we can't do that because "googleclouddebugger" depends on # "cdbg_native" that hasn't been built yet. version = None with open('googleclouddebugger/version.py', 'r') as version_file: version_pattern = re.compile(r"^\s*__version__\s*=\s*'([0-9.]*)'") for line in version_file: match = version_pattern.match(line) if match:
class CompilerDetection(object): # Necessary for OSX. See https://github.com/mdtraj/mdtraj/issues/576 # The problem is that distutils.sysconfig.customize_compiler() # is necessary to properly invoke the correct compiler for this class # (otherwise the CC env variable isn't respected). Unfortunately, # distutils.sysconfig.customize_compiler() DIES on OSX unless some # appropriate initialization routines have been called. This line # has a side effect of calling those initialzation routes, and is therefor # necessary for OSX, even though we don't use the result. _DONT_REMOVE_ME = get_config_vars() def __init__(self, disable_openmp): self.disable_openmp = disable_openmp self._is_initialized = False def initialize(self): if self._is_initialized: return cc = new_compiler() customize_compiler(cc) self.msvc = cc.compiler_type == 'msvc' self._print_compiler_version(cc) if self.disable_openmp: self.openmp_enabled = False else: self.openmp_enabled, openmp_needs_gomp = self._detect_openmp() self.sse3_enabled = self._detect_sse3() if not self.msvc else True self.sse41_enabled = self._detect_sse41() if not self.msvc else True self.compiler_args_sse2 = ['-msse2'] if not self.msvc else ['/arch:SSE2'] self.compiler_args_sse3 = ['-mssse3'] if (self.sse3_enabled and not self.msvc) else [] self.compiler_args_warn = ['-Wno-unused-function', '-Wno-unreachable-code', '-Wno-sign-compare'] if not self.msvc else [] self.compiler_args_sse41, self.define_macros_sse41 = [], [] if self.sse41_enabled: self.define_macros_sse41 = [('__SSE4__', 1), ('__SSE4_1__', 1)] if not self.msvc: self.compiler_args_sse41 = ['-msse4'] if self.openmp_enabled: self.compiler_libraries_openmp = [] if self.msvc: self.compiler_args_openmp = ['/openmp'] else: self.compiler_args_openmp = ['-fopenmp'] if openmp_needs_gomp: self.compiler_libraries_openmp = ['gomp'] else: self.compiler_libraries_openmp = [] self.compiler_args_openmp = [] if self.msvc: self.compiler_args_opt = ['/O2'] else: self.compiler_args_opt = ['-O3', '-funroll-loops'] print() self._is_initialized = True def _print_compiler_version(self, cc): print("C compiler:") try: if self.msvc: if not cc.initialized: cc.initialize() cc.spawn([cc.cc]) else: cc.spawn([cc.compiler[0]] + ['-v']) except DistutilsExecError: pass def hasfunction(self, funcname, include=None, libraries=None, extra_postargs=None): # running in a separate subshell lets us prevent unwanted stdout/stderr part1 = ''' from __future__ import print_function import os import json from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler, get_config_vars FUNCNAME = json.loads('%(funcname)s') INCLUDE = json.loads('%(include)s') LIBRARIES = json.loads('%(libraries)s') EXTRA_POSTARGS = json.loads('%(extra_postargs)s') ''' % { 'funcname': json.dumps(funcname), 'include': json.dumps(include), 'libraries': json.dumps(libraries or []), 'extra_postargs': json.dumps(extra_postargs)} part2 = ''' get_config_vars() # DON'T REMOVE ME cc = new_compiler() customize_compiler(cc) for library in LIBRARIES: cc.add_library(library) status = 0 try: with open('func.c', 'w') as f: if INCLUDE is not None: f.write('#include %s\\n' % INCLUDE) f.write('int main(void) {\\n') f.write(' %s;\\n' % FUNCNAME) f.write('}\\n') objects = cc.compile(['func.c'], output_dir='.', extra_postargs=EXTRA_POSTARGS) cc.link_executable(objects, 'a.out') except Exception as e: status = 1 exit(status) ''' tmpdir = tempfile.mkdtemp(prefix='hasfunction-') try: curdir = os.path.abspath(os.curdir) os.chdir(tmpdir) with open('script.py', 'w') as f: f.write(part1 + part2) proc = subprocess.Popen( [sys.executable, 'script.py'], stderr=subprocess.PIPE, stdout=subprocess.PIPE) proc.communicate() status = proc.wait() finally: os.chdir(curdir) shutil.rmtree(tmpdir) return status == 0 def _print_support_start(self, feature): print('Attempting to autodetect {0:6} support...'.format(feature), end=' ') def _print_support_end(self, feature, status): if status is True: print('Compiler supports {0}'.format(feature)) else: print('Did not detect {0} support'.format(feature)) def _detect_openmp(self): self._print_support_start('OpenMP') hasopenmp = self.hasfunction('omp_get_num_threads()', extra_postargs=['-fopenmp', '/openmp']) needs_gomp = hasopenmp if not hasopenmp: hasopenmp = self.hasfunction('omp_get_num_threads()', libraries=['gomp']) needs_gomp = hasopenmp self._print_support_end('OpenMP', hasopenmp) return hasopenmp, needs_gomp def _detect_sse3(self): "Does this compiler support SSE3 intrinsics?" self._print_support_start('SSE3') result = self.hasfunction('__m128 v; _mm_hadd_ps(v,v)', include='<pmmintrin.h>', extra_postargs=['-msse3']) self._print_support_end('SSE3', result) return result def _detect_sse41(self): "Does this compiler support SSE4.1 intrinsics?" self._print_support_start('SSE4.1') result = self.hasfunction( '__m128 v; _mm_round_ps(v,0x00)', include='<smmintrin.h>', extra_postargs=['-msse4']) self._print_support_end('SSE4.1', result) return result
CHANGES = f.read() extension_options = { 'sources': ['./rapidjson.cpp'], 'include_dirs': [rj_include_dir], 'define_macros': [('PYTHON_RAPIDJSON_VERSION', VERSION)], } cxx = sysconfig.get_config_var('CXX') if cxx and 'g++' in cxx: # Avoid warning about invalid flag for C++ for varname in ('CFLAGS', 'OPT'): value = sysconfig.get_config_var(varname) if value and '-Wstrict-prototypes' in value: value = value.replace('-Wstrict-prototypes', '') sysconfig.get_config_vars()[varname] = value # Add -pedantic, so we get a warning when using non-standard features, and # -Wno-long-long to pacify old gcc (or Apple's hybrids) that treat "long # long" as an error under C++ (see issue #69) extension_options['extra_compile_args'] = ['-pedantic', '-Wno-long-long'] # Up to Python 3.7, some structures use "char*" instead of "const char*", # and ISO C++ forbids assigning string literal constants if sys.version_info < (3, 7): extension_options['extra_compile_args'].append('-Wno-write-strings') setup(name='python-rapidjson', version=VERSION, description='Python wrapper around rapidjson', long_description=LONG_DESCRIPTION + '\n\n' + CHANGES,
raise ValueError("Version string not found") VERSION = get_version() if (sys.platform.startswith('linux') or sys.platform.startswith('cygwin') or sys.platform.startswith('gnukfreebsd')): ostype = 'linux' so_ext = '.so' LD_LIBRARY_PATH = 'LD_LIBRARY_PATH' elif sys.platform.startswith('darwin'): ostype = 'mac' so_ext = '.dylib' LD_LIBRARY_PATH = 'DYLD_LIBRARY_PATH' from distutils.sysconfig import get_config_vars conf_vars = get_config_vars() # setuptools/pip install by default generate "bundled" library. Bundled # library cannot be linked at compile time # https://stackoverflow.com/questions/24519863/what-are-the-g-flags-to-build-a-true-so-mh-bundle-shared-library-on-mac-osx # configs LDSHARED and CCSHARED and SO are hard coded in lib/pythonX.X/_sysconfigdata.py # In some Python version, steuptools may correct these configs for OS X on the # fly by _customize_compiler_for_shlib function or setup_shlib_compiler function # in lib/pythonX.X/site-packages/setuptools/command/build_ext.py. # The hacks below ensures that the OS X compiler does not generate bundle # libraries. Relevant code: # lib/pythonX.X/_sysconfigdata.py # lib/pythonX.X/distutils/command/build_ext.py # lib/pythonX.X/distutils/sysconfig.py, get_config_vars() # lib/pythonX.X/distutils/ccompiler.py, link_shared_object() # lib/pythonX.X/distutils/unixcompiler.py, link() conf_vars['LDSHARED'] = conf_vars['LDSHARED'].replace(
def hasfunction(self, funcname, include=None, libraries=None, extra_postargs=None): # running in a separate subshell lets us prevent unwanted stdout/stderr part1 = ''' from __future__ import print_function import os import json from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler, get_config_vars FUNCNAME = json.loads('%(funcname)s') INCLUDE = json.loads('%(include)s') LIBRARIES = json.loads('%(libraries)s') EXTRA_POSTARGS = json.loads('%(extra_postargs)s') ''' % { 'funcname': json.dumps(funcname), 'include': json.dumps(include), 'libraries': json.dumps(libraries or []), 'extra_postargs': json.dumps(extra_postargs) } part2 = ''' get_config_vars() # DON'T REMOVE ME cc = new_compiler() customize_compiler(cc) for library in LIBRARIES: cc.add_library(library) status = 0 try: with open('func.c', 'w') as f: if INCLUDE is not None: f.write('#include %s\\n' % INCLUDE) f.write('int main(void) {\\n') f.write(' %s;\\n' % FUNCNAME) f.write('}\\n') objects = cc.compile(['func.c'], output_dir='.', extra_postargs=EXTRA_POSTARGS) cc.link_executable(objects, 'a.out') except Exception as e: status = 1 exit(status) ''' tmpdir = tempfile.mkdtemp(prefix='hasfunction-') try: curdir = os.path.abspath(os.curdir) os.chdir(tmpdir) with open('script.py', 'w') as f: f.write(part1 + part2) proc = subprocess.Popen([sys.executable, 'script.py'], stderr=subprocess.PIPE, stdout=subprocess.PIPE) proc.communicate() status = proc.wait() finally: os.chdir(curdir) shutil.rmtree(tmpdir) return status == 0
from distutils import sysconfig import sys import os import platform import versioneer min_python_version = "3.6" min_numpy_build_version = "1.11" min_numpy_run_version = "1.15" min_llvmlite_version = "0.31.0.dev0" max_llvmlite_version = "0.33.0.dev0" if sys.platform.startswith('linux'): # Patch for #2555 to make wheels without libpython sysconfig.get_config_vars()['Py_ENABLE_SHARED'] = 0 class build_doc(build.build): description = "build documentation" def run(self): spawn(['make', '-C', 'docs', 'html']) versioneer.VCS = 'git' versioneer.versionfile_source = 'numba/_version.py' versioneer.versionfile_build = 'numba/_version.py' versioneer.tag_prefix = '' versioneer.parentdir_prefix = 'numba-'
def GetPythonPath(self): if self._pwd_ is None: self._pwd_ = os.path.dirname(os.path.realpath('__file__')) # Get python version self.python_version = sys.version_info if self.python_version[:2] < (2, 7) or ( 3, 0) <= self.python_version[:2] < (3, 5): raise RuntimeError("Python version 2.7 or >= 3.5 required.") self.py_major = self.python_version.major self.py_minor = self.python_version.minor self.py_micro = self.python_version.micro self.python_interpreter = 'python' + str(self.py_major) + '.' + str( self.py_minor) with_pymalloc = "" if get_config_var("ABIFLAGS") is not None: with_pymalloc = get_config_var("ABIFLAGS") self.python_interpreter += with_pymalloc # Get python implementation e.g. CPython, PyPy etc self.python_implementation = platform.python_implementation() # Get python include path self.python_include_path = get_python_inc() # Get python lib path # Note that we need the actual ld path where libpython.so/dylib/dll resides # and this seems like the only portable way at the moment lib_postfix = ".so" if "darwin" in self._os: lib_postfix = ".dylib" # The following search is split into two searches for /usr/lib and /usr/local # for speed purposes libpython = "lib" + self.python_interpreter + lib_postfix for root, _, filenames in os.walk('/usr/lib/'): for filename in fnmatch.filter(filenames, libpython): self.python_ld_path = os.path.join( root, filename).rsplit(libpython)[0] break if self.python_ld_path is None: for root, _, filenames in os.walk('/usr/local/'): for filename in fnmatch.filter(filenames, libpython): self.python_ld_path = os.path.join( root, filename).rsplit(libpython)[0] break # For conda envs change python_ld_path if "conda" in sys.version or "Continuum" in sys.version or "Intel" in sys.version: self.python_ld_path = get_config_var("LIBDIR") # Sanity check if self.python_ld_path is None: try: self.python_ld_path = get_config_var("LIBDIR") except: raise RuntimeError("Could not find libpython") # Get postfix for extensions self.extension_postfix = get_config_vars()['SO'][1:] if self.extension_postfix is None: if "darwin" in self._os: self.extension_postfix = "dylib" else: self.extension_postfix = "so"
try: from numpy.distutils.misc_util import get_info from os.path import dirname WITHNUMPY = True except: WITHNUMPY = False srcs = [x for x in glob.glob("libBigWig/*.c")] srcs.append("pyBigWig.c") libs=["m", "z"] # do not link to python on mac, see https://github.com/deeptools/pyBigWig/issues/58 if 'dynamic_lookup' not in (sysconfig.get_config_var('LDSHARED') or ''): if sysconfig.get_config_vars('BLDLIBRARY') is not None: #Note the "-l" prefix! for e in sysconfig.get_config_vars('BLDLIBRARY')[0].split(): if e[0:2] == "-l": libs.append(e[2:]) elif sys.version_info[0] >= 3 and sys.version_info[1] >= 3: libs.append("python%i.%im" % (sys.version_info[0], sys.version_info[1])) else: libs.append("python%i.%i" % (sys.version_info[0], sys.version_info[1])) additional_libs = [sysconfig.get_config_var("LIBDIR"), sysconfig.get_config_var("LIBPL")] defines = [] try: foo, _ = subprocess.Popen(['curl-config', '--libs'], stdout=subprocess.PIPE).communicate() libs.append("curl")