def build(cfile, outputfilename, compile_extra, link_extra, include_dirs, libraries, library_dirs): "cd into the directory where the cfile is, use distutils to build" compiler = new_compiler(force=1, verbose=2) compiler.customize('') objects = [] old = os.getcwd() os.chdir(cfile.parent) try: res = compiler.compile( [str(cfile.name)], include_dirs=include_dirs, extra_preargs=compile_extra ) objects += [str(cfile.parent / r) for r in res] finally: os.chdir(old) compiler.link_shared_object( objects, str(outputfilename), libraries=libraries, extra_preargs=link_extra, library_dirs=library_dirs)
def _get_compiler(): """Get a compiler equivalent to the one that will be used to build sklearn Handles compiler specified as follows: - python setup.py build_ext --compiler=<compiler> - CC=<compiler> python setup.py build_ext """ dist = Distribution( { "script_name": os.path.basename(sys.argv[0]), "script_args": sys.argv[1:], "cmdclass": {"config_cc": config_cc}, } ) dist.parse_config_files() dist.parse_command_line() cmd_opts = dist.command_options.get("build_ext") if cmd_opts is not None and "compiler" in cmd_opts: compiler = cmd_opts["compiler"][1] else: compiler = None ccompiler = new_compiler(compiler=compiler) customize_compiler(ccompiler) return ccompiler
def compile_test_program(code, extra_preargs=[], extra_postargs=[]): """Check that some C code can be compiled and run""" ccompiler = new_compiler() customize_compiler(ccompiler) # extra_(pre/post)args can be a callable to make it possible to get its # value from the compiler if callable(extra_preargs): extra_preargs = extra_preargs(ccompiler) if callable(extra_postargs): extra_postargs = extra_postargs(ccompiler) start_dir = os.path.abspath(".") with tempfile.TemporaryDirectory() as tmp_dir: try: os.chdir(tmp_dir) # Write test program with open("test_program.c", "w") as f: f.write(code) os.mkdir("objects") # Compile, test program ccompiler.compile( ["test_program.c"], output_dir="objects", extra_postargs=extra_postargs, ) # Link test program objects = glob.glob( os.path.join("objects", "*" + ccompiler.obj_extension) ) ccompiler.link_executable( objects, "test_program", extra_preargs=extra_preargs, extra_postargs=extra_postargs, ) # Run test program # will raise a CalledProcessError if return code was non-zero output = subprocess.check_output("./test_program") output = output.decode(sys.stdout.encoding or "utf-8").splitlines() except Exception: raise finally: os.chdir(start_dir) return output
def test_compile1(self): # Compile source and link the first source c = ccompiler.new_compiler() previousDir = os.getcwd() try: # Change directory to not screw up directories os.chdir(self._dir1) c.compile([os.path.basename(self._src1)], output_dir=self._dir1) # Ensure that the object exists assert_(os.path.isfile(self._src1.replace('.c', '.o')) or os.path.isfile(self._src1.replace('.c', '.obj'))) finally: os.chdir(previousDir)
def test_compile1(self): # Compile source and link the first source c = ccompiler.new_compiler() try: # Change directory to not screw up directories previousDir = os.getcwd() os.chdir(self._dir1) c.compile([os.path.basename(self._src1)], output_dir=self._dir1) # Ensure that the object exists assert_(os.path.isfile(self._src1.replace('.c', '.o'))) os.chdir(previousDir) except OSError: pass
def _init_cxxcompiler(self, compiler_type): cxxcompiler = new_compiler(compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) if cxxcompiler is not None: cxxcompiler.customize(self.distribution, need_cxx=1) cxxcompiler.customize_cmd(self) self.cxxcompiler = cxxcompiler.cxx_compiler() try: get_cxx_tool_path(self.cxxcompiler) except DistutilsSetupError: self.cxxcompiler = None if self.cxxcompiler: self.scons_cxxcompiler = dist2sconscxx(self.cxxcompiler) self.scons_cxxcompiler_path = protect_path(get_cxx_tool_path(self.cxxcompiler))
def test_compile2(self): # Compile source and link the second source tsi = self.c_temp2 c = ccompiler.new_compiler() extra_link_args = tsi.calc_extra_info()['extra_link_args'] previousDir = os.getcwd() try: # Change directory to not screw up directories os.chdir(self._dir2) c.compile([os.path.basename(self._src2)], output_dir=self._dir2, extra_postargs=extra_link_args) # Ensure that the object exists assert_(os.path.isfile(self._src2.replace('.c', '.o'))) finally: os.chdir(previousDir)
def have_compiler(): """ Return True if there appears to be an executable compiler """ compiler = ccompiler.new_compiler() try: cmd = compiler.compiler # Unix compilers except AttributeError: try: compiler.initialize() # MSVC is different except DistutilsError: return False cmd = [compiler.cc] try: Popen(cmd, stdout=PIPE, stderr=PIPE) except OSError: return False return True
def _init_cxxcompiler(self, compiler_type): cxxcompiler = new_compiler(compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) if cxxcompiler is not None: cxxcompiler.customize(self.distribution, need_cxx=1) cxxcompiler.customize_cmd(self) self.cxxcompiler = cxxcompiler.cxx_compiler() try: get_cxx_tool_path(self.cxxcompiler) except DistutilsSetupError: self.cxxcompiler = None if self.cxxcompiler: self.scons_cxxcompiler = dist2sconscxx(self.cxxcompiler) self.scons_cxxcompiler_path = protect_path( get_cxx_tool_path(self.cxxcompiler))
def finalize_options(self): old_build_ext.finalize_options(self) if self.distribution.has_scons_scripts(): self.sconscripts = self.distribution.get_scons_scripts() self.pre_hooks = self.distribution.get_scons_pre_hooks() self.post_hooks = self.distribution.get_scons_post_hooks() self.pkg_names = self.distribution.get_scons_parent_names() else: self.sconscripts = [] self.pre_hooks = [] self.post_hooks = [] self.pkg_names = [] # Try to get the same compiler than the ones used by distutils: this is # non trivial because distutils and scons have totally different # conventions on this one (distutils uses PATH from user's environment, # whereas scons uses standard locations). The way we do it is once we # got the c compiler used, we use numpy.distutils function to get the # full path, and add the path to the env['PATH'] variable in env # instance (this is done in numpy.distutils.scons module). # XXX: The logic to bypass distutils is ... not so logic. compiler_type = self.compiler if compiler_type == 'msvc': self._bypass_distutils_cc = True from numpy.distutils.ccompiler import new_compiler try: distutils_compiler = new_compiler(compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) distutils_compiler.customize(self.distribution) # This initialization seems necessary, sometimes, for find_executable to work... if hasattr(distutils_compiler, 'initialize'): distutils_compiler.initialize() self.scons_compiler = dist2sconscc(distutils_compiler) self.scons_compiler_path = protect_path( get_tool_path(distutils_compiler)) except DistutilsPlatformError, e: if not self._bypass_distutils_cc: raise e else: self.scons_compiler = compiler_type
def _init_ccompiler(self, compiler_type): # XXX: The logic to bypass distutils is ... not so logic. if compiler_type == "msvc": self._bypass_distutils_cc = True try: distutils_compiler = new_compiler( compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force ) distutils_compiler.customize(self.distribution) # This initialization seems necessary, sometimes, for find_executable to work... if hasattr(distutils_compiler, "initialize"): distutils_compiler.initialize() self.scons_compiler = dist2sconscc(distutils_compiler) self.scons_compiler_path = protect_path(get_tool_path(distutils_compiler)) except DistutilsPlatformError, e: if not self._bypass_distutils_cc: raise e else: self.scons_compiler = compiler_type
def finalize_options(self): old_build_ext.finalize_options(self) if self.distribution.has_scons_scripts(): self.sconscripts = self.distribution.get_scons_scripts() self.pre_hooks = self.distribution.get_scons_pre_hooks() self.post_hooks = self.distribution.get_scons_post_hooks() self.pkg_names = self.distribution.get_scons_parent_names() else: self.sconscripts = [] self.pre_hooks = [] self.post_hooks = [] self.pkg_names = [] # Try to get the same compiler than the ones used by distutils: this is # non trivial because distutils and scons have totally different # conventions on this one (distutils uses PATH from user's environment, # whereas scons uses standard locations). The way we do it is once we # got the c compiler used, we use numpy.distutils function to get the # full path, and add the path to the env['PATH'] variable in env # instance (this is done in numpy.distutils.scons module). # XXX: The logic to bypass distutils is ... not so logic. compiler_type = self.compiler if compiler_type == 'msvc': self._bypass_distutils_cc = True from numpy.distutils.ccompiler import new_compiler try: distutils_compiler = new_compiler(compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) distutils_compiler.customize(self.distribution) # This initialization seems necessary, sometimes, for find_executable to work... if hasattr(distutils_compiler, 'initialize'): distutils_compiler.initialize() self.scons_compiler = dist2sconscc(distutils_compiler) self.scons_compiler_path = protect_path(get_tool_path(distutils_compiler)) except DistutilsPlatformError, e: if not self._bypass_distutils_cc: raise e else: self.scons_compiler = compiler_type
def _init_ccompiler(self, compiler_type): # XXX: The logic to bypass distutils is ... not so logic. if compiler_type == 'msvc': self._bypass_distutils_cc = True try: distutils_compiler = new_compiler(compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) distutils_compiler.customize(self.distribution) # This initialization seems necessary, sometimes, for find_executable to work... if hasattr(distutils_compiler, 'initialize'): distutils_compiler.initialize() self.scons_compiler = dist2sconscc(distutils_compiler) self.scons_compiler_path = protect_path( get_tool_path(distutils_compiler)) except DistutilsPlatformError, e: if not self._bypass_distutils_cc: raise e else: self.scons_compiler = compiler_type
def have_compiler(): """ Return True if there appears to be an executable compiler """ compiler = ccompiler.new_compiler() compiler.customize(None) try: cmd = compiler.compiler # Unix compilers except AttributeError: try: if not compiler.initialized: compiler.initialize() # MSVC is different except (DistutilsError, ValueError): return False cmd = [compiler.cc] try: p = Popen(cmd, stdout=PIPE, stderr=PIPE) p.stdout.close() p.stderr.close() p.wait() except OSError: return False return True
def create_compiler_instance(dist): # build_ext is in charge of building C/C++ files. # We are using it and dist to parse config files, and command line # configurations. There may be other ways to handle this, but I'm # worried I may miss one of the steps in distutils if I do it my self. #ext_builder = build_ext(dist) #ext_builder.finalize_options () # For some reason the build_ext stuff wasn't picking up the compiler # setting, so we grab it manually from the distribution object instead. opts = dist.command_options.get('build_ext', None) compiler_name = '' if opts: comp = opts.get('compiler', ('', '')) compiler_name = comp[1] # Create a new compiler, customize it based on the build settings, # and return it. if not compiler_name: compiler_name = None #print compiler_name compiler = new_compiler(compiler=compiler_name) customize_compiler(compiler) return compiler
def _get_compiler(): """Get a compiler equivalent to the one that will be used to build sklearn Handles compiler specified as follows: - python setup.py build_ext --compiler=<compiler> - CC=<compiler> python setup.py build_ext """ dist = Distribution({ 'script_name': os.path.basename(sys.argv[0]), 'script_args': sys.argv[1:], 'cmdclass': { 'config_cc': config_cc } }) cmd_opts = dist.command_options.get('build_ext') if cmd_opts is not None and 'compiler' in cmd_opts: compiler = cmd_opts['compiler'][1] else: compiler = None ccompiler = new_compiler(compiler=compiler) customize_compiler(ccompiler) return ccompiler
def run (self): fn = os.path.join(self.config_path, "Make.cfg") if os.path.isfile(fn) and os.path.isfile(self.config_h): print '*'*70 print 'Files\n%s\n%s\n exist.' % (fn,self.config_h) print 'Skipping pygist configuration'\ ' (remove %s to force reconfiguration).' % fn print '*'*70 return from numpy.distutils.log import set_verbosity from numpy.distutils.ccompiler import new_compiler save_verbosity = set_verbosity(-1) self.compiler = new_compiler(compiler=self.compiler, verbose=0) old_spawn = self.compiler.spawn self.compiler.spawn = self.spawn from distutils.sysconfig import customize_compiler customize_compiler(self.compiler) self.configfile = open(fn,'w') self.configfile.write('# Make.cfg from setup.py script ' + time.ctime() + '\n') if sys.platform != 'win32': self.configfile.write('#') for item in os.uname(): self.configfile.write(' '+item) self.configfile.write('\n') self.config_toplevel() self.config_unix() self.config_x11() print 'wrote',fn self.configfile.close() set_verbosity(save_verbosity) self.compiler.spawn = old_spawn
def create_compiler_instance(dist): # build_ext is in charge of building C/C++ files. # We are using it and dist to parse config files, and command line # configurations. There may be other ways to handle this, but I'm # worried I may miss one of the steps in distutils if I do it my self. #ext_builder = build_ext(dist) #ext_builder.finalize_options () # For some reason the build_ext stuff wasn't picking up the compiler # setting, so we grab it manually from the distribution object instead. opts = dist.command_options.get('build_ext',None) compiler_name = '' if opts: comp = opts.get('compiler',('','')) compiler_name = comp[1] # Create a new compiler, customize it based on the build settings, # and return it. if not compiler_name: compiler_name = None #print compiler_name compiler = new_compiler(compiler=compiler_name) customize_compiler(compiler) return compiler
def check_openmp_support(): """Check whether OpenMP test code can be compiled and run""" ccompiler = new_compiler() customize_compiler(ccompiler) if os.getenv('SKLEARN_NO_OPENMP'): # Build explicitly without OpenMP support return False start_dir = os.path.abspath('.') with tempfile.TemporaryDirectory() as tmp_dir: try: os.chdir(tmp_dir) # Write test program with open('test_openmp.c', 'w') as f: f.write(CCODE) os.mkdir('objects') # Compile, test program openmp_flags = get_openmp_flag(ccompiler) ccompiler.compile(['test_openmp.c'], output_dir='objects', extra_postargs=openmp_flags) # Link test program extra_preargs = os.getenv('LDFLAGS', None) if extra_preargs is not None: extra_preargs = extra_preargs.split(" ") else: extra_preargs = [] objects = glob.glob( os.path.join('objects', '*' + ccompiler.obj_extension)) ccompiler.link_executable(objects, 'test_openmp', extra_preargs=extra_preargs, extra_postargs=openmp_flags) # Run test program output = subprocess.check_output('./test_openmp') output = output.decode(sys.stdout.encoding or 'utf-8').splitlines() # Check test program output if 'nthreads=' in output[0]: nthreads = int(output[0].strip().split('=')[1]) openmp_supported = (len(output) == nthreads) else: openmp_supported = False except (CompileError, LinkError, subprocess.CalledProcessError): openmp_supported = False finally: os.chdir(start_dir) err_message = textwrap.dedent( """ *** It seems that scikit-learn cannot be built with OpenMP support. - Make sure you have followed the installation instructions: https://scikit-learn.org/dev/developers/advanced_installation.html - If your compiler supports OpenMP but the build still fails, please submit a bug report at: https://github.com/scikit-learn/scikit-learn/issues - If you want to build scikit-learn without OpenMP support, you can set the environment variable SKLEARN_NO_OPENMP and rerun the build command. Note however that some estimators will run in sequential mode and their `n_jobs` parameter will have no effect anymore. *** """) if not openmp_supported: raise CompileError(err_message) return True
class TestSystemInfoReading: def setup(self): """ Create the libraries """ # Create 2 sources and 2 libraries self._dir1 = mkdtemp() self._src1 = os.path.join(self._dir1, "foo.c") self._lib1 = os.path.join(self._dir1, "libfoo.so") self._dir2 = mkdtemp() self._src2 = os.path.join(self._dir2, "bar.c") self._lib2 = os.path.join(self._dir2, "libbar.so") # Update local site.cfg global simple_site, site_cfg site_cfg = simple_site.format( **{ "dir1": self._dir1, "lib1": self._lib1, "dir2": self._dir2, "lib2": self._lib2, "pathsep": os.pathsep, "lib2_escaped": _shell_utils.NativeParser.join([self._lib2]), }) # Write site.cfg fd, self._sitecfg = mkstemp() os.close(fd) with open(self._sitecfg, "w") as fd: fd.write(site_cfg) # Write the sources with open(self._src1, "w") as fd: fd.write(fakelib_c_text) with open(self._src2, "w") as fd: fd.write(fakelib_c_text) # We create all class-instances def site_and_parse(c, site_cfg): c.files = [site_cfg] c.parse_config_files() return c self.c_default = site_and_parse(get_class("default"), self._sitecfg) self.c_temp1 = site_and_parse(get_class("temp1"), self._sitecfg) self.c_temp2 = site_and_parse(get_class("temp2"), self._sitecfg) self.c_dup_options = site_and_parse(get_class("duplicate_options"), self._sitecfg) def teardown(self): # Do each removal separately try: shutil.rmtree(self._dir1) except Exception: pass try: shutil.rmtree(self._dir2) except Exception: pass try: os.remove(self._sitecfg) except Exception: pass def test_all(self): # Read in all information in the ALL block tsi = self.c_default assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) extra = tsi.calc_extra_info() assert_equal( extra["extra_compile_args"], ["-I/fake/directory", "-I/path with/spaces", "-Os"], ) def test_temp1(self): # Read in all information in the temp1 block tsi = self.c_temp1 assert_equal(tsi.get_lib_dirs(), [self._dir1]) assert_equal(tsi.get_libraries(), [self._lib1]) assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) def test_temp2(self): # Read in all information in the temp2 block tsi = self.c_temp2 assert_equal(tsi.get_lib_dirs(), [self._dir2]) assert_equal(tsi.get_libraries(), [self._lib2]) # Now from rpath and not runtime_library_dirs assert_equal(tsi.get_runtime_lib_dirs(key="rpath"), [self._dir2]) extra = tsi.calc_extra_info() assert_equal(extra["extra_link_args"], ["-Wl,-rpath=" + self._lib2]) def test_duplicate_options(self): # Ensure that duplicates are raising an AliasedOptionError tsi = self.c_dup_options assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") def test_compile1(self): # Compile source and link the first source c = customized_ccompiler() previousDir = os.getcwd() try: # Change directory to not screw up directories os.chdir(self._dir1) c.compile([os.path.basename(self._src1)], output_dir=self._dir1) # Ensure that the object exists assert_( os.path.isfile(self._src1.replace(".c", ".o")) or os.path.isfile(self._src1.replace(".c", ".obj"))) finally: os.chdir(previousDir) @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") @pytest.mark.skipif("msvc" in repr(ccompiler.new_compiler()), reason="Fails with MSVC compiler ") def test_compile2(self): # Compile source and link the second source tsi = self.c_temp2 c = customized_ccompiler() extra_link_args = tsi.calc_extra_info()["extra_link_args"] previousDir = os.getcwd() try: # Change directory to not screw up directories os.chdir(self._dir2) c.compile( [os.path.basename(self._src2)], output_dir=self._dir2, extra_postargs=extra_link_args, ) # Ensure that the object exists assert_(os.path.isfile(self._src2.replace(".c", ".o"))) finally: os.chdir(previousDir) def test_overrides(self): previousDir = os.getcwd() cfg = os.path.join(self._dir1, "site.cfg") shutil.copy(self._sitecfg, cfg) try: os.chdir(self._dir1) # Check that the '[ALL]' section does not override # missing values from other sections info = mkl_info() lib_dirs = info.cp["ALL"]["library_dirs"].split(os.pathsep) assert info.get_lib_dirs() != lib_dirs # But if we copy the values to a '[mkl]' section the value # is correct with open(cfg, "r") as fid: mkl = fid.read().replace("ALL", "mkl") with open(cfg, "w") as fid: fid.write(mkl) info = mkl_info() assert info.get_lib_dirs() == lib_dirs # Also, the values will be taken from a section named '[DEFAULT]' with open(cfg, "r") as fid: dflt = fid.read().replace("mkl", "DEFAULT") with open(cfg, "w") as fid: fid.write(dflt) info = mkl_info() assert info.get_lib_dirs() == lib_dirs finally: os.chdir(previousDir)
def finalize_options(self): old_build_ext.finalize_options(self) if self.distribution.has_scons_scripts(): self.sconscripts = self.distribution.get_scons_scripts() self.pre_hooks = self.distribution.get_scons_pre_hooks() self.post_hooks = self.distribution.get_scons_post_hooks() self.pkg_names = self.distribution.get_scons_parent_names() else: self.sconscripts = [] self.pre_hooks = [] self.post_hooks = [] self.pkg_names = [] # To avoid trouble, just don't do anything if no sconscripts are used. # This is useful when for example f2py uses numpy.distutils, because # f2py does not pass compiler information to scons command, and the # compilation setup below can crash in some situation. if len(self.sconscripts) > 0: # Try to get the same compiler than the ones used by distutils: this is # non trivial because distutils and scons have totally different # conventions on this one (distutils uses PATH from user's environment, # whereas scons uses standard locations). The way we do it is once we # got the c compiler used, we use numpy.distutils function to get the # full path, and add the path to the env['PATH'] variable in env # instance (this is done in numpy.distutils.scons module). # XXX: The logic to bypass distutils is ... not so logic. compiler_type = self.compiler if compiler_type == 'msvc': self._bypass_distutils_cc = True from numpy.distutils.ccompiler import new_compiler try: distutils_compiler = new_compiler(compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) distutils_compiler.customize(self.distribution) # This initialization seems necessary, sometimes, for find_executable to work... if hasattr(distutils_compiler, 'initialize'): distutils_compiler.initialize() self.scons_compiler = dist2sconscc(distutils_compiler) self.scons_compiler_path = protect_path(get_tool_path(distutils_compiler)) except DistutilsPlatformError, e: if not self._bypass_distutils_cc: raise e else: self.scons_compiler = compiler_type # We do the same for the fortran compiler ... fcompiler_type = self.fcompiler from numpy.distutils.fcompiler import new_fcompiler self.fcompiler = new_fcompiler(compiler = fcompiler_type, verbose = self.verbose, dry_run = self.dry_run, force = self.force) if self.fcompiler is not None: self.fcompiler.customize(self.distribution) # And the C++ compiler cxxcompiler = new_compiler(compiler = compiler_type, verbose = self.verbose, dry_run = self.dry_run, force = self.force) if cxxcompiler is not None: cxxcompiler.customize(self.distribution, need_cxx = 1) cxxcompiler.customize_cmd(self) self.cxxcompiler = cxxcompiler.cxx_compiler() try: get_cxx_tool_path(self.cxxcompiler) except DistutilsSetupError: self.cxxcompiler = None if self.package_list: self.package_list = parse_package_list(self.package_list)
def finalize_options(self): old_build_ext.finalize_options(self) if self.distribution.has_scons_scripts(): self.sconscripts = self.distribution.get_scons_scripts() self.pre_hooks = self.distribution.get_scons_pre_hooks() self.post_hooks = self.distribution.get_scons_post_hooks() self.pkg_names = self.distribution.get_scons_parent_names() else: self.sconscripts = [] self.pre_hooks = [] self.post_hooks = [] self.pkg_names = [] # To avoid trouble, just don't do anything if no sconscripts are used. # This is useful when for example f2py uses numpy.distutils, because # f2py does not pass compiler information to scons command, and the # compilation setup below can crash in some situation. if len(self.sconscripts) > 0: # Try to get the same compiler than the ones used by distutils: this is # non trivial because distutils and scons have totally different # conventions on this one (distutils uses PATH from user's environment, # whereas scons uses standard locations). The way we do it is once we # got the c compiler used, we use numpy.distutils function to get the # full path, and add the path to the env['PATH'] variable in env # instance (this is done in numpy.distutils.scons module). # XXX: The logic to bypass distutils is ... not so logic. compiler_type = self.compiler if compiler_type == 'msvc': self._bypass_distutils_cc = True from numpy.distutils.ccompiler import new_compiler try: distutils_compiler = new_compiler(compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) distutils_compiler.customize(self.distribution) # This initialization seems necessary, sometimes, for find_executable to work... if hasattr(distutils_compiler, 'initialize'): distutils_compiler.initialize() self.scons_compiler = dist2sconscc(distutils_compiler) self.scons_compiler_path = protect_path( get_tool_path(distutils_compiler)) except DistutilsPlatformError, e: if not self._bypass_distutils_cc: raise e else: self.scons_compiler = compiler_type # We do the same for the fortran compiler ... fcompiler_type = self.fcompiler from numpy.distutils.fcompiler import new_fcompiler self.fcompiler = new_fcompiler(compiler=fcompiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) if self.fcompiler is not None: self.fcompiler.customize(self.distribution) # And the C++ compiler cxxcompiler = new_compiler(compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) if cxxcompiler is not None: cxxcompiler.customize(self.distribution, need_cxx=1) cxxcompiler.customize_cmd(self) self.cxxcompiler = cxxcompiler.cxx_compiler() try: get_cxx_tool_path(self.cxxcompiler) except DistutilsSetupError: self.cxxcompiler = None if self.package_list: self.package_list = parse_package_list(self.package_list)
sources=['fastoverlap/f90/fastbulk.f90'], extra_compile_args=extra_compile_args, **merge_libs(fftw, lapack)) fastcluster_ext = Extension(name='fastoverlap.f90.fastclusters', sources=['fastoverlap/f90/fastclusters.f90'], extra_compile_args=extra_compile_args, **merge_libs(fftw, lapack)) bnb_ext = Extension(name='fastoverlap.f90.libbnb', sources=['fastoverlap/f90/bnbalign.f90'], extra_compile_args=extra_compile_args, **merge_libs(lapack, queue)) if __name__ == "__main__": # compiling static fortran priority queue library fcompiler = new_fcompiler(compiler='gfortran') ccompiler = new_compiler() fcompiler.customize() queue_objs = fcompiler.compile( sources=["fastoverlap/f90/priorityqueue.f90"], output_dir='build', extra_preargs=['-c', '-fPIC']) ccompiler.create_static_lib(queue_objs, "queue", output_dir='build', debug=1) setup(name='fastoverlap', version='0.1', description=('Algorithms for fast alignment of atomic' 'structures in finite and periodic systems'), url='https://github.com/matthewghgriffiths/fastoverlap',
class scons(old_build_ext): # XXX: add an option to the scons command for configuration (auto/force/cache). description = "Scons builder" user_options = old_build_ext.user_options + \ [('jobs=', None, "specify number of worker threads when executing scons"), ('scons-tool-path=', None, 'specify additional path '\ '(absolute) to look for scons tools'), ('silent=', None, 'specify whether scons output should less verbose'\ '(1), silent (2), super silent (3) or not (0, default)'), ('package-list=', None, 'If specified, only run scons on the given '\ 'packages (example: --package-list=scipy.cluster). If empty, '\ 'no package is built')] def initialize_options(self): old_build_ext.initialize_options(self) self.jobs = None self.silent = 0 self.scons_tool_path = '' # If true, we bypass distutils to find the c compiler altogether. This # is to be used in desperate cases (like incompatible visual studio # version). self._bypass_distutils_cc = False self.scons_compiler = None self.scons_compiler_path = None self.scons_fcompiler = None self.package_list = None def finalize_options(self): old_build_ext.finalize_options(self) if self.distribution.has_scons_scripts(): self.sconscripts = self.distribution.get_scons_scripts() self.pre_hooks = self.distribution.get_scons_pre_hooks() self.post_hooks = self.distribution.get_scons_post_hooks() self.pkg_names = self.distribution.get_scons_parent_names() else: self.sconscripts = [] self.pre_hooks = [] self.post_hooks = [] self.pkg_names = [] # Try to get the same compiler than the ones used by distutils: this is # non trivial because distutils and scons have totally different # conventions on this one (distutils uses PATH from user's environment, # whereas scons uses standard locations). The way we do it is once we # got the c compiler used, we use numpy.distutils function to get the # full path, and add the path to the env['PATH'] variable in env # instance (this is done in numpy.distutils.scons module). # XXX: The logic to bypass distutils is ... not so logic. compiler_type = self.compiler if compiler_type == 'msvc': self._bypass_distutils_cc = True from numpy.distutils.ccompiler import new_compiler try: distutils_compiler = new_compiler(compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) distutils_compiler.customize(self.distribution) # This initialization seems necessary, sometimes, for find_executable to work... if hasattr(distutils_compiler, 'initialize'): distutils_compiler.initialize() self.scons_compiler = dist2sconscc(distutils_compiler) self.scons_compiler_path = protect_path( get_tool_path(distutils_compiler)) except DistutilsPlatformError, e: if not self._bypass_distutils_cc: raise e else: self.scons_compiler = compiler_type # We do the same for the fortran compiler ... fcompiler_type = self.fcompiler from numpy.distutils.fcompiler import new_fcompiler self.fcompiler = new_fcompiler(compiler=fcompiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) if self.fcompiler is not None: self.fcompiler.customize(self.distribution) # And the C++ compiler cxxcompiler = new_compiler(compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) if cxxcompiler is not None: cxxcompiler.customize(self.distribution, need_cxx=1) cxxcompiler.customize_cmd(self) self.cxxcompiler = cxxcompiler.cxx_compiler() try: get_cxx_tool_path(self.cxxcompiler) except DistutilsSetupError: self.cxxcompiler = None if self.package_list: self.package_list = parse_package_list(self.package_list)
([('MS_WIN64', 1)] if isWindows() and BITS == 64 else []), libraries=[] if isWindows() else ['m']) extensions = [grib_ext] # build native executables - have to get hands a little dirty grib_sources = [path.join(here, *x) for x in [ ('src', 'wgrib.c'), ]] grib_exe = 'wgrib' if 'build_ext' in sys.argv: try: from numpy.distutils import ccompiler, fcompiler, log # get compilers cc = ccompiler.new_compiler() log.set_verbosity(1) # show compilation commands # build sources print('\nBuilding wgrib...') if not isWindows(): # clunky hack to force position independent code on *nix systems for var in ['CFLAGS', 'FFLAGS', 'LDFLAGS']: flags = os.environ.get(var, '-fPIC') flags += ' -fPIC' if '-fPIC' not in flags else '' os.environ[var] = flags try: grib_objs = cc.compile(list(map(fix_path, grib_sources)), output_dir=gettempdir()) cc.link_executable(grib_objs,
class TestSystemInfoReading: def setup(self): """ Create the libraries """ # Create 2 sources and 2 libraries self._dir1 = mkdtemp() self._src1 = os.path.join(self._dir1, 'foo.c') self._lib1 = os.path.join(self._dir1, 'libfoo.so') self._dir2 = mkdtemp() self._src2 = os.path.join(self._dir2, 'bar.c') self._lib2 = os.path.join(self._dir2, 'libbar.so') # Update local site.cfg global simple_site, site_cfg site_cfg = simple_site.format(**{ 'dir1': self._dir1, 'lib1': self._lib1, 'dir2': self._dir2, 'lib2': self._lib2, 'pathsep': os.pathsep, 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2]) }) # Write site.cfg fd, self._sitecfg = mkstemp() os.close(fd) with open(self._sitecfg, 'w') as fd: fd.write(site_cfg) # Write the sources with open(self._src1, 'w') as fd: fd.write(fakelib_c_text) with open(self._src2, 'w') as fd: fd.write(fakelib_c_text) # We create all class-instances def site_and_parse(c, site_cfg): c.files = [site_cfg] c.parse_config_files() return c self.c_default = site_and_parse(get_class('default'), self._sitecfg) self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) self.c_dup_options = site_and_parse(get_class('duplicate_options'), self._sitecfg) def teardown(self): # Do each removal separately try: shutil.rmtree(self._dir1) except Exception: pass try: shutil.rmtree(self._dir2) except Exception: pass try: os.remove(self._sitecfg) except Exception: pass def test_all(self): # Read in all information in the ALL block tsi = self.c_default assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) extra = tsi.calc_extra_info() assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) def test_temp1(self): # Read in all information in the temp1 block tsi = self.c_temp1 assert_equal(tsi.get_lib_dirs(), [self._dir1]) assert_equal(tsi.get_libraries(), [self._lib1]) assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) def test_temp2(self): # Read in all information in the temp2 block tsi = self.c_temp2 assert_equal(tsi.get_lib_dirs(), [self._dir2]) assert_equal(tsi.get_libraries(), [self._lib2]) # Now from rpath and not runtime_library_dirs assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) extra = tsi.calc_extra_info() assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) def test_duplicate_options(self): # Ensure that duplicates are raising an AliasedOptionError tsi = self.c_dup_options assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") def test_compile1(self): # Compile source and link the first source c = customized_ccompiler() previousDir = os.getcwd() try: # Change directory to not screw up directories os.chdir(self._dir1) c.compile([os.path.basename(self._src1)], output_dir=self._dir1) # Ensure that the object exists assert_(os.path.isfile(self._src1.replace('.c', '.o')) or os.path.isfile(self._src1.replace('.c', '.obj'))) finally: os.chdir(previousDir) @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), reason="Fails with MSVC compiler ") def test_compile2(self): # Compile source and link the second source tsi = self.c_temp2 c = customized_ccompiler() extra_link_args = tsi.calc_extra_info()['extra_link_args'] previousDir = os.getcwd() try: # Change directory to not screw up directories os.chdir(self._dir2) c.compile([os.path.basename(self._src2)], output_dir=self._dir2, extra_postargs=extra_link_args) # Ensure that the object exists assert_(os.path.isfile(self._src2.replace('.c', '.o'))) finally: os.chdir(previousDir) HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", []) @pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if " "numpy is built with MKL support")) def test_overrides(self): previousDir = os.getcwd() cfg = os.path.join(self._dir1, 'site.cfg') shutil.copy(self._sitecfg, cfg) try: os.chdir(self._dir1) # Check that the '[ALL]' section does not override # missing values from other sections info = mkl_info() lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep) assert info.get_lib_dirs() != lib_dirs # But if we copy the values to a '[mkl]' section the value # is correct with open(cfg, 'r') as fid: mkl = fid.read().replace('[ALL]', '[mkl]', 1) with open(cfg, 'w') as fid: fid.write(mkl) info = mkl_info() assert info.get_lib_dirs() == lib_dirs # Also, the values will be taken from a section named '[DEFAULT]' with open(cfg, 'r') as fid: dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1) with open(cfg, 'w') as fid: fid.write(dflt) info = mkl_info() assert info.get_lib_dirs() == lib_dirs finally: os.chdir(previousDir)
def run(self): if not self.extensions: return # Make sure that extension sources are complete. self.run_command('build_src') if self.distribution.has_c_libraries(): self.run_command('build_clib') build_clib = self.get_finalized_command('build_clib') self.library_dirs.append(build_clib.build_clib) ## JRK addition for ext in self.extensions: ext.include_dirs.append(build_clib.build_clib) ### end addition else: build_clib = None # Not including C libraries to the list of # extension libraries automatically to prevent # bogus linking commands. Extensions must # explicitly specify the C libraries that they use. from distutils.ccompiler import new_compiler from numpy.distutils.fcompiler import new_fcompiler compiler_type = self.compiler # Initialize C compiler: self.compiler = new_compiler(compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) self.compiler.customize(self.distribution) self.compiler.customize_cmd(self) self.compiler.show_customization() # Create mapping of libraries built by build_clib: clibs = {} if build_clib is not None: for libname,build_info in build_clib.libraries or []: if libname in clibs and clibs[libname] != build_info: log.warn('library %r defined more than once,'\ ' overwriting build_info\n%s... \nwith\n%s...' \ % (libname, `clibs[libname]`[:300], `build_info`[:300])) clibs[libname] = build_info # .. and distribution libraries: for libname,build_info in self.distribution.libraries or []: if libname in clibs: # build_clib libraries have a precedence before distribution ones continue clibs[libname] = build_info # Determine if C++/Fortran 77/Fortran 90 compilers are needed. # Update extension libraries, library_dirs, and macros. all_languages = set() for ext in self.extensions: ext_languages = set() c_libs = [] c_lib_dirs = [] macros = [] for libname in ext.libraries: if libname in clibs: binfo = clibs[libname] c_libs += binfo.get('libraries',[]) c_lib_dirs += binfo.get('library_dirs',[]) for m in binfo.get('macros',[]): if m not in macros: macros.append(m) for l in clibs.get(libname,{}).get('source_languages',[]): ext_languages.add(l) if c_libs: new_c_libs = ext.libraries + c_libs log.info('updating extension %r libraries from %r to %r' % (ext.name, ext.libraries, new_c_libs)) ext.libraries = new_c_libs ext.library_dirs = ext.library_dirs + c_lib_dirs if macros: log.info('extending extension %r defined_macros with %r' % (ext.name, macros)) ext.define_macros = ext.define_macros + macros # determine extension languages if has_f_sources(ext.sources): ext_languages.add('f77') if has_cxx_sources(ext.sources): ext_languages.add('c++') l = ext.language or self.compiler.detect_language(ext.sources) if l: ext_languages.add(l) # reset language attribute for choosing proper linker if 'c++' in ext_languages: ext_language = 'c++' elif 'f90' in ext_languages: ext_language = 'f90' elif 'f77' in ext_languages: ext_language = 'f77' else: ext_language = 'c' # default if l and l != ext_language and ext.language: log.warn('resetting extension %r language from %r to %r.' % (ext.name,l,ext_language)) ext.language = ext_language # global language all_languages.update(ext_languages) need_f90_compiler = 'f90' in all_languages need_f77_compiler = 'f77' in all_languages need_cxx_compiler = 'c++' in all_languages # Initialize C++ compiler: if need_cxx_compiler: self._cxx_compiler = new_compiler(compiler=compiler_type, verbose=self.verbose, dry_run=self.dry_run, force=self.force) compiler = self._cxx_compiler compiler.customize(self.distribution,need_cxx=need_cxx_compiler) compiler.customize_cmd(self) compiler.show_customization() self._cxx_compiler = compiler.cxx_compiler() else: self._cxx_compiler = None # Initialize Fortran 77 compiler: if need_f77_compiler: ctype = self.fcompiler self._f77_compiler = new_fcompiler(compiler=self.fcompiler, verbose=self.verbose, dry_run=self.dry_run, force=self.force, requiref90=False, c_compiler=self.compiler) fcompiler = self._f77_compiler if fcompiler: ctype = fcompiler.compiler_type fcompiler.customize(self.distribution) if fcompiler and fcompiler.get_version(): fcompiler.customize_cmd(self) fcompiler.show_customization() else: self.warn('f77_compiler=%s is not available.' % (ctype)) self._f77_compiler = None else: self._f77_compiler = None # Initialize Fortran 90 compiler: if need_f90_compiler: ctype = self.fcompiler self._f90_compiler = new_fcompiler(compiler=self.fcompiler, verbose=self.verbose, dry_run=self.dry_run, force=self.force, requiref90=True, c_compiler = self.compiler) fcompiler = self._f90_compiler if fcompiler: ctype = fcompiler.compiler_type fcompiler.customize(self.distribution) if fcompiler and fcompiler.get_version(): fcompiler.customize_cmd(self) fcompiler.show_customization() else: self.warn('f90_compiler=%s is not available.' % (ctype)) self._f90_compiler = None else: self._f90_compiler = None # Build extensions self.build_extensions()
def setup_package(): # Rewrite the version file every time write_version_py() info['version'] = get_version_info()[0] print(info['version']) if USE_CYTHON: # Obtain the numpy include directory. This logic works across numpy # versions. ext_modules = [] HAS_NUMPY = True try: import numpy as np except: info['setup_requires'] = ['numpy'] HAS_NUMPY = False if HAS_NUMPY: try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() _cevent = Extension( name='refnx.reduce._cevent', sources=['src/_cevent.pyx'], include_dirs=[numpy_include], language='c++', # libraries= # extra_compile_args = "...".split(), ) ext_modules.append(_cevent) _cutil = Extension( name='refnx._lib._cutil', sources=['src/_cutil.pyx'], include_dirs=[numpy_include], language='c', # libraries= # extra_compile_args = "...".split(), ) ext_modules.append(_cutil) # creflect extension module # Compile reflectivity calculator to object with C compiler # first. # It's not possible to do this in an Extension object because # the `-std=c++11` compile arg and C99 C code are incompatible # (at least on Darwin). from numpy.distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler ccompiler = new_compiler() customize_compiler(ccompiler) ccompiler.verbose = True extra_preargs = [ '-O2', ] if sys.platform == 'win32': # use the C++ code on Windows. The C++ code uses the # std::complex<double> object for its arithmetic. f = ['src/refcalc.cpp'] else: # and C code on other machines. The C code uses C99 complex # arithmetic which is 10-20% faster. # the CMPLX macro was only standardised in C11 extra_preargs.extend([ '-std=c11', '-funsafe-math-optimizations', '-ffinite-math-only', ]) f = ['src/refcalc.c'] refcalc_obj = ccompiler.compile(f, extra_preargs=extra_preargs) print(refcalc_obj) _creflect = Extension( name='refnx.reflect._creflect', sources=['src/_creflect.pyx', 'src/refcaller.cpp'], include_dirs=[numpy_include], language='c++', extra_compile_args=['-std=c++11'], extra_objects=refcalc_obj, ) ext_modules.append(_creflect) # if we have openmp use pure cython version # openmp should be present on windows, linux # # However, it's not present in Apple Clang. Therefore one has to # jump through hoops to enable it. # It's probably easier to install OpenMP on macOS via homebrew. # However, it's fairly simple to build the OpenMP library, and # installing it into PREFIX=/usr/local # # https://gist.github.com/andyfaff/084005bee32aee83d6b59e843278ab3e # # Instructions for macOS: # # brew install libomp # export CC=clang # export CXX=clang++ # export CXXFLAGS="$CXXFLAGS -Xpreprocessor -fopenmp" # export CFLAGS="$CFLAGS -I/usr/local/opt/libomp/include" # export CXXFLAGS="$CXXFLAGS -I/usr/local/opt/libomp/include" # export LDFLAGS="$LDFLAGS -L/usr/local/opt/libomp/lib -lomp" # export DYLD_LIBRARY_PATH=/usr/local/opt/libomp/lib if HAS_OPENMP: # cyreflect extension module _cyreflect = Extension( name='refnx.reflect._cyreflect', sources=['src/_cyreflect.pyx'], include_dirs=[numpy_include], language='c++', extra_compile_args=[], extra_link_args=[] # libraries= # extra_compile_args = "...".split(), ) openmp_flags = get_openmp_flag(ccompiler) _cyreflect.extra_compile_args += openmp_flags _cyreflect.extra_link_args += openmp_flags ext_modules.append(_cyreflect) # specify min deployment version for macOS if platform == 'darwin': for mod in ext_modules: mod.extra_compile_args.append('-mmacosx-version-min=10.9') info['cmdclass'].update({'build_ext': build_ext}) info['ext_modules'] = ext_modules info['zip_safe'] = False try: setup(**info) except ValueError: # there probably wasn't a C-compiler (windows). Try removing extension # compilation print("") print("*****WARNING*****") print( "You didn't try to build the Reflectivity calculation extension." " Calculation will be slow, falling back to pure python." " To compile extension install cython. If installing in windows you" " should then install from Visual Studio command prompt (this makes" " C compiler available") print("*****************") print("") info.pop('cmdclass') info.pop('ext_modules') setup(**info)
def check_openmp_support(): """Check whether OpenMP test code can be compiled and run""" ccompiler = new_compiler() customize_compiler(ccompiler) if os.getenv("SKTIME_NO_OPENMP"): # Build explicitly without OpenMP support return False start_dir = os.path.abspath(".") with tempfile.TemporaryDirectory() as tmp_dir: try: os.chdir(tmp_dir) # Write test program with open("test_openmp.c", "w") as f: f.write(CCODE) os.mkdir("objects") # Compile, test program openmp_flags = get_openmp_flag(ccompiler) ccompiler.compile(["test_openmp.c"], output_dir="objects", extra_postargs=openmp_flags) # Link test program extra_preargs = os.getenv("LDFLAGS", None) if extra_preargs is not None: extra_preargs = extra_preargs.split(" ") else: extra_preargs = [] objects = glob.glob( os.path.join("objects", "*" + ccompiler.obj_extension)) ccompiler.link_executable( objects, "test_openmp", extra_preargs=extra_preargs, extra_postargs=openmp_flags, ) # Run test program output = subprocess.check_output("./test_openmp") output = output.decode(sys.stdout.encoding or "utf-8").splitlines() # Check test program output if "nthreads=" in output[0]: nthreads = int(output[0].strip().split("=")[1]) openmp_supported = len(output) == nthreads else: openmp_supported = False except (CompileError, LinkError, subprocess.CalledProcessError): openmp_supported = False finally: os.chdir(start_dir) err_message = textwrap.dedent(""" *** It seems that sktime cannot be built with OpenMP support. - If your compiler supports OpenMP but the build still fails, please submit a bug report at: 'https://github.com/alan-turing-institute/sktime/issues' - If you want to build sktime without OpenMP support, you can set the environment variable SKTIME_NO_OPENMP and rerun the build command. Note however that some estimators will run in sequential mode and their `n_jobs` parameter will have no effect anymore. - See sktime advanced installation instructions for more info: 'https://https://www.sktime.org/en/latest/installation.html' *** """) if not openmp_supported: raise CompileError(err_message) return True
class TestSystemInfoReading(TestCase): def setUp(self): """ Create the libraries """ # Create 2 sources and 2 libraries self._dir1 = mkdtemp() self._src1 = os.path.join(self._dir1, 'foo.c') self._lib1 = os.path.join(self._dir1, 'libfoo.so') self._dir2 = mkdtemp() self._src2 = os.path.join(self._dir2, 'bar.c') self._lib2 = os.path.join(self._dir2, 'libbar.so') # Update local site.cfg global simple_site, site_cfg site_cfg = simple_site.format(**{ 'dir1': self._dir1, 'lib1': self._lib1, 'dir2': self._dir2, 'lib2': self._lib2, 'pathsep': os.pathsep }) # Write site.cfg fd, self._sitecfg = mkstemp() os.close(fd) with open(self._sitecfg, 'w') as fd: fd.write(site_cfg) # Write the sources with open(self._src1, 'w') as fd: fd.write(fakelib_c_text) with open(self._src2, 'w') as fd: fd.write(fakelib_c_text) # We create all class-instances def site_and_parse(c, site_cfg): c.files = [site_cfg] c.parse_config_files() return c self.c_default = site_and_parse(get_class('default'), self._sitecfg) self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) def tearDown(self): # Do each removal separately try: shutil.rmtree(self._dir1) except: pass try: shutil.rmtree(self._dir2) except: pass try: os.remove(self._sitecfg) except: pass def test_all(self): # Read in all information in the ALL block tsi = self.c_default assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) extra = tsi.calc_extra_info() assert_equal(extra['extra_compile_args'], ['-I/fake/directory']) def test_temp1(self): # Read in all information in the temp1 block tsi = self.c_temp1 assert_equal(tsi.get_lib_dirs(), [self._dir1]) assert_equal(tsi.get_libraries(), [self._lib1]) assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) def test_temp2(self): # Read in all information in the temp2 block tsi = self.c_temp2 assert_equal(tsi.get_lib_dirs(), [self._dir2]) assert_equal(tsi.get_libraries(), [self._lib2]) # Now from rpath and not runtime_library_dirs assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) extra = tsi.calc_extra_info() assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) @skipif(not HAVE_COMPILER) def test_compile1(self): # Compile source and link the first source c = ccompiler.new_compiler() c.customize(None) previousDir = os.getcwd() try: # Change directory to not screw up directories os.chdir(self._dir1) c.compile([os.path.basename(self._src1)], output_dir=self._dir1) # Ensure that the object exists assert_(os.path.isfile(self._src1.replace('.c', '.o')) or os.path.isfile(self._src1.replace('.c', '.obj'))) finally: os.chdir(previousDir) @skipif(not HAVE_COMPILER) @skipif('msvc' in repr(ccompiler.new_compiler())) def test_compile2(self): # Compile source and link the second source tsi = self.c_temp2 c = ccompiler.new_compiler() c.customize(None) extra_link_args = tsi.calc_extra_info()['extra_link_args'] previousDir = os.getcwd() try: # Change directory to not screw up directories os.chdir(self._dir2) c.compile([os.path.basename(self._src2)], output_dir=self._dir2, extra_postargs=extra_link_args) # Ensure that the object exists assert_(os.path.isfile(self._src2.replace('.c', '.o'))) finally: os.chdir(previousDir)
class TestSystemInfoReading(object): def setup(self): """ Create the libraries """ # Create 2 sources and 2 libraries self._dir1 = mkdtemp() self._src1 = os.path.join(self._dir1, 'foo.c') self._lib1 = os.path.join(self._dir1, 'libfoo.so') self._dir2 = mkdtemp() self._src2 = os.path.join(self._dir2, 'bar.c') self._lib2 = os.path.join(self._dir2, 'libbar.so') # Update local site.cfg global simple_site, site_cfg site_cfg = simple_site.format( **{ 'dir1': self._dir1, 'lib1': self._lib1, 'dir2': self._dir2, 'lib2': self._lib2, 'pathsep': os.pathsep, 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2]) }) # Write site.cfg fd, self._sitecfg = mkstemp() os.close(fd) with open(self._sitecfg, 'w') as fd: fd.write(site_cfg) # Write the sources with open(self._src1, 'w') as fd: fd.write(fakelib_c_text) with open(self._src2, 'w') as fd: fd.write(fakelib_c_text) # We create all class-instances def site_and_parse(c, site_cfg): c.files = [site_cfg] c.parse_config_files() return c self.c_default = site_and_parse(get_class('default'), self._sitecfg) self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) self.c_dup_options = site_and_parse(get_class('duplicate_options'), self._sitecfg) def teardown(self): # Do each removal separately try: shutil.rmtree(self._dir1) except Exception: pass try: shutil.rmtree(self._dir2) except Exception: pass try: os.remove(self._sitecfg) except Exception: pass def test_all(self): # Read in all information in the ALL block tsi = self.c_default assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) extra = tsi.calc_extra_info() assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) def test_temp1(self): # Read in all information in the temp1 block tsi = self.c_temp1 assert_equal(tsi.get_lib_dirs(), [self._dir1]) assert_equal(tsi.get_libraries(), [self._lib1]) assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) def test_temp2(self): # Read in all information in the temp2 block tsi = self.c_temp2 assert_equal(tsi.get_lib_dirs(), [self._dir2]) assert_equal(tsi.get_libraries(), [self._lib2]) # Now from rpath and not runtime_library_dirs assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) extra = tsi.calc_extra_info() assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) def test_duplicate_options(self): # Ensure that duplicates are raising an AliasedOptionError tsi = self.c_dup_options assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") def test_compile1(self): # Compile source and link the first source c = customized_ccompiler() previousDir = os.getcwd() try: # Change directory to not screw up directories os.chdir(self._dir1) c.compile([os.path.basename(self._src1)], output_dir=self._dir1) # Ensure that the object exists assert_( os.path.isfile(self._src1.replace('.c', '.o')) or os.path.isfile(self._src1.replace('.c', '.obj'))) finally: os.chdir(previousDir) @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), reason="Fails with MSVC compiler ") def test_compile2(self): # Compile source and link the second source tsi = self.c_temp2 c = customized_ccompiler() extra_link_args = tsi.calc_extra_info()['extra_link_args'] previousDir = os.getcwd() try: # Change directory to not screw up directories os.chdir(self._dir2) c.compile([os.path.basename(self._src2)], output_dir=self._dir2, extra_postargs=extra_link_args) # Ensure that the object exists assert_(os.path.isfile(self._src2.replace('.c', '.o'))) finally: os.chdir(previousDir)
def check_openmp_support(): """Check whether OpenMP test code can be compiled and run""" ccompiler = new_compiler() customize_compiler(ccompiler) if os.getenv('MREX_NO_OPENMP'): # Build explicitly without OpenMP support return False start_dir = os.path.abspath('.') with tempfile.TemporaryDirectory() as tmp_dir: try: os.chdir(tmp_dir) # Write test program with open('test_openmp.c', 'w') as f: f.write(CCODE) os.mkdir('objects') # Compile, test program openmp_flags = get_openmp_flag(ccompiler) ccompiler.compile(['test_openmp.c'], output_dir='objects', extra_postargs=openmp_flags) # Link test program extra_preargs = os.getenv('LDFLAGS', None) if extra_preargs is not None: extra_preargs = extra_preargs.split(" ") else: extra_preargs = [] objects = glob.glob( os.path.join('objects', '*' + ccompiler.obj_extension)) ccompiler.link_executable(objects, 'test_openmp', extra_preargs=extra_preargs, extra_postargs=openmp_flags) # Run test program output = subprocess.check_output('./test_openmp') output = output.decode(sys.stdout.encoding or 'utf-8').splitlines() # Check test program output if 'nthreads=' in output[0]: nthreads = int(output[0].strip().split('=')[1]) openmp_supported = (len(output) == nthreads) else: openmp_supported = False except (CompileError, LinkError, subprocess.CalledProcessError): openmp_supported = False finally: os.chdir(start_dir) err_message = textwrap.dedent(""" *** It seems that scikit-learn cannot be built with OpenMP support. - Make sure you have followed the installation instructions: https://scikit-learn.org/dev/developers/advanced_installation.html - If your compiler supports OpenMP but the build still fails, please submit a bug report at: https://github.com/scikit-learn/scikit-learn/issues - If you want to build scikit-learn without OpenMP support, you can set the environment variable MREX_NO_OPENMP and rerun the build command. Note however that some estimators will run in sequential mode and their `n_jobs` parameter will have no effect anymore. *** """) if not openmp_supported: raise CompileError(err_message) return True
def check_openmp_support(): """Check whether OpenMP test code can be compiled and run""" try: from numpy.distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler from distutils.errors import CompileError, LinkError except ImportError: return False ccompiler = new_compiler() customize_compiler(ccompiler) start_dir = os.path.abspath('.') with tempfile.TemporaryDirectory() as tmp_dir: try: os.chdir(tmp_dir) # Write test program with open('test_openmp.c', 'w') as f: f.write(CCODE) os.mkdir('objects') # Compile, test program openmp_flags = get_openmp_flag(ccompiler) ccompiler.compile(['test_openmp.c'], output_dir='objects', extra_postargs=openmp_flags) # Link test program extra_preargs = os.getenv('LDFLAGS', None) if extra_preargs is not None: extra_preargs = extra_preargs.split(" ") else: extra_preargs = [] objects = glob.glob( os.path.join('objects', '*' + ccompiler.obj_extension)) ccompiler.link_executable(objects, 'test_openmp', extra_preargs=extra_preargs, extra_postargs=openmp_flags) # Run test program output = subprocess.check_output('./test_openmp') output = output.decode(sys.stdout.encoding or 'utf-8').splitlines() # Check test program output if 'nthreads=' in output[0]: nthreads = int(output[0].strip().split('=')[1]) openmp_supported = (len(output) == nthreads) else: openmp_supported = False except (CompileError, LinkError, subprocess.CalledProcessError): openmp_supported = False finally: os.chdir(start_dir) return openmp_supported