def sanity_check_step(self): """Custom sanity check for Boost.""" shlib_ext = get_shared_lib_ext() custom_paths = { 'files': ['lib/libboost_system.%s' % shlib_ext], 'dirs': ['include/boost'] } if self.cfg['boost_mpi']: custom_paths["files"].append('lib/libboost_mpi.%s' % shlib_ext) if get_software_root('Python'): pymajorver = get_software_version('Python').split('.')[0] pyminorver = get_software_version('Python').split('.')[1] if int(pymajorver) >= 3: suffix = pymajorver elif LooseVersion(self.version) >= LooseVersion("1.67.0"): suffix = '%s%s' % (pymajorver, pyminorver) else: suffix = '' custom_paths["files"].append('lib/libboost_python%s.%s' % (suffix, shlib_ext)) if self.cfg['boost_multi_thread']: custom_paths["files"].append('lib/libboost_thread-mt.%s' % shlib_ext) if self.cfg['boost_mpi'] and self.cfg['boost_multi_thread']: custom_paths["files"].append('lib/libboost_mpi-mt.%s' % shlib_ext) super(EB_Boost, self).sanity_check_step(custom_paths=custom_paths)
def configure_step(self): """Set some extra environment variables before configuring.""" # make sure that required dependencies are loaded deps = ['Boost', 'Python', 'SWIG'] depsdict = {} for dep in deps: deproot = get_software_root(dep) if not deproot: self.log.error("%s module not loaded?" % dep) else: depsdict.update({dep:deproot}) # SWIG version more recent than 2.0.4 have a regression # which causes problems with e.g. DOLFIN if UFC was built with it # fixed in 2.0.7? see https://bugs.launchpad.net/dolfin/+bug/996398 if LooseVersion(get_software_version('SWIG')) > '2.0.4': self.log.error("Using bad version of SWIG, expecting swig <= 2.0.4." \ " See https://bugs.launchpad.net/dolfin/+bug/996398") self.pyver = ".".join(get_software_version('Python').split(".")[:-1]) self.cfg.update('configopts', "-DBoost_DIR=%s" % depsdict['Boost']) self.cfg.update('configopts', "-DBOOST_INCLUDEDIR=%s/include" % depsdict['Boost']) self.cfg.update('configopts', "-DBoost_DEBUG=ON -DBOOST_ROOT=%s" % depsdict['Boost']) self.cfg.update('configopts', '-DUFC_ENABLE_PYTHON:BOOL=ON') self.cfg.update('configopts', '-DSWIG_FOUND:BOOL=ON') self.cfg.update('configopts', '-DPYTHON_LIBRARY=%s/lib/libpython%s.so' % (depsdict['Python'], self.pyver)) self.cfg.update('configopts', '-DPYTHON_INCLUDE_PATH=%s/include/python%s' % (depsdict['Python'], self.pyver)) super(EB_UFC, self).configure_step()
def configure_step(self): """Set extra configure options.""" self.cfg.update('configopts', "--with-threads --enable-shared") # Need to be careful to match the unicode settings to the underlying python if sys.maxunicode == 1114111: self.cfg.update('configopts', "--enable-unicode=ucs4") elif sys.maxunicode == 65535: self.cfg.update('configopts', "--enable-unicode=ucs2") else: raise EasyBuildError("Unknown maxunicode value for your python: %d" % sys.maxunicode) modules_setup_dist = os.path.join(self.cfg['start_dir'], 'Modules', 'Setup.dist') libreadline = get_software_root('libreadline') if libreadline: ncurses = get_software_root('ncurses') if ncurses: readline_libdir = get_software_libdir('libreadline') ncurses_libdir = get_software_libdir('ncurses') readline_static_lib = os.path.join(libreadline, readline_libdir, 'libreadline.a') ncurses_static_lib = os.path.join(ncurses, ncurses_libdir, 'libncurses.a') readline = "readline readline.c %s %s" % (readline_static_lib, ncurses_static_lib) for line in fileinput.input(modules_setup_dist, inplace='1', backup='.readline'): line = re.sub(r"^#readline readline.c.*", readline, line) sys.stdout.write(line) else: raise EasyBuildError("Both libreadline and ncurses are required to ensure readline support") openssl = get_software_root('OpenSSL') if openssl: for line in fileinput.input(modules_setup_dist, inplace='1', backup='.ssl'): line = re.sub(r"^#SSL=.*", "SSL=%s" % openssl, line) line = re.sub(r"^#(\s*-DUSE_SSL -I)", r"\1", line) line = re.sub(r"^#(\s*-L\$\(SSL\)/lib )", r"\1 -L$(SSL)/lib64 ", line) sys.stdout.write(line) tcl = get_software_root('Tcl') tk = get_software_root('Tk') if tcl and tk: tclver = get_software_version('Tcl') tkver = get_software_version('Tk') tcltk_maj_min_ver = '.'.join(tclver.split('.')[:2]) if tcltk_maj_min_ver != '.'.join(tkver.split('.')[:2]): raise EasyBuildError("Tcl and Tk major/minor versions don't match: %s vs %s", tclver, tkver) self.cfg.update('configopts', "--with-tcltk-includes='-I%s/include -I%s/include'" % (tcl, tk)) tcl_libdir = os.path.join(tcl, get_software_libdir('Tcl')) tk_libdir = os.path.join(tk, get_software_libdir('Tk')) tcltk_libs = "-L%(tcl_libdir)s -L%(tk_libdir)s -ltcl%(maj_min_ver)s -ltk%(maj_min_ver)s" % { 'tcl_libdir': tcl_libdir, 'tk_libdir': tk_libdir, 'maj_min_ver': tcltk_maj_min_ver, } self.cfg.update('configopts', "--with-tcltk-libs='%s'" % tcltk_libs) super(EB_Python, self).configure_step()
def configure_step(self): """Custom configuration procedure for Bazel.""" binutils_root = get_software_root('binutils') gcc_root = get_software_root('GCCcore') or get_software_root('GCC') gcc_ver = get_software_version('GCCcore') or get_software_version('GCC') # only patch Bazel scripts if binutils & GCC installation prefix could be determined if binutils_root and gcc_root: res = glob.glob(os.path.join(gcc_root, 'lib', 'gcc', '*', gcc_ver, 'include')) if res and len(res) == 1: gcc_lib_inc = res[0] else: raise EasyBuildError("Failed to pinpoint location of GCC include files: %s", res) gcc_lib_inc_fixed = os.path.join(os.path.dirname(gcc_lib_inc), 'include-fixed') if not os.path.exists(gcc_lib_inc_fixed): raise EasyBuildError("Derived directory %s does not exist", gcc_lib_inc_fixed) gcc_cplusplus_inc = os.path.join(gcc_root, 'include', 'c++', gcc_ver) if not os.path.exists(gcc_cplusplus_inc): raise EasyBuildError("Derived directory %s does not exist", gcc_cplusplus_inc) # replace hardcoded paths in CROSSTOOL regex_subs = [ (r'-B/usr/bin', '-B%s' % os.path.join(binutils_root, 'bin')), (r'(cxx_builtin_include_directory:.*)/usr/lib/gcc', r'\1%s' % gcc_lib_inc), (r'(cxx_builtin_include_directory:.*)/usr/local/include', r'\1%s' % gcc_lib_inc_fixed), (r'(cxx_builtin_include_directory:.*)/usr/include', r'\1%s' % gcc_cplusplus_inc), ] for tool in ['ar', 'cpp', 'dwp', 'gcc', 'ld']: path = which(tool) if path: regex_subs.append((os.path.join('/usr', 'bin', tool), path)) else: raise EasyBuildError("Failed to determine path to '%s'", tool) apply_regex_substitutions(os.path.join('tools', 'cpp', 'CROSSTOOL'), regex_subs) # replace hardcoded paths in (unix_)cc_configure.bzl regex_subs = [ (r'-B/usr/bin', '-B%s' % os.path.join(binutils_root, 'bin')), (r'"/usr/bin', '"' + os.path.join(binutils_root, 'bin')), ] for conf_bzl in ['cc_configure.bzl', 'unix_cc_configure.bzl']: filepath = os.path.join('tools', 'cpp', conf_bzl) if os.path.exists(filepath): apply_regex_substitutions(filepath, regex_subs) else: self.log.info("Not patching Bazel build scripts, installation prefix for binutils/GCC not found") # enable building in parallel env.setvar('EXTRA_BAZEL_ARGS', '--jobs=%d' % self.cfg['parallel'])
def test_get_software_root_version_libdir(self): """Test get_software_X functions.""" tmpdir = tempfile.mkdtemp() test_cases = [ ('GCC', 'GCC'), ('grib_api', 'GRIB_API'), ('netCDF-C++', 'NETCDFMINCPLUSPLUS'), ('Score-P', 'SCOREMINP'), ] for (name, env_var_name) in test_cases: # mock stuff that get_software_X functions rely on root = os.path.join(tmpdir, name) os.makedirs(os.path.join(root, 'lib')) os.environ['EBROOT%s' % env_var_name] = root version = '0.0-%s' % root os.environ['EBVERSION%s' % env_var_name] = version self.assertEqual(get_software_root(name), root) self.assertEqual(get_software_version(name), version) self.assertEqual(get_software_libdir(name), 'lib') os.environ.pop('EBROOT%s' % env_var_name) os.environ.pop('EBVERSION%s' % env_var_name) # check expected result of get_software_libdir with multiple lib subdirs root = os.path.join(tmpdir, name) os.makedirs(os.path.join(root, 'lib64')) os.environ['EBROOT%s' % env_var_name] = root self.assertErrorRegex(EasyBuildError, "Multiple library subdirectories found.*", get_software_libdir, name) self.assertEqual(get_software_libdir(name, only_one=False), ['lib', 'lib64']) # only directories containing files in specified list should be retained open(os.path.join(root, 'lib64', 'foo'), 'w').write('foo') self.assertEqual(get_software_libdir(name, fs=['foo']), 'lib64') # clean up for previous tests os.environ.pop('EBROOT%s' % env_var_name) # if root/version for specified software package can not be found, these functions should return None self.assertEqual(get_software_root('foo'), None) self.assertEqual(get_software_version('foo'), None) self.assertEqual(get_software_libdir('foo'), None) # if no library subdir is found, get_software_libdir should return None os.environ['EBROOTFOO'] = tmpdir self.assertEqual(get_software_libdir('foo'), None) os.environ.pop('EBROOTFOO') shutil.rmtree(tmpdir)
def configure_step(self): """Set extra configure options.""" self.cfg.update("configopts", "--with-threads --enable-shared") modules_setup_dist = os.path.join(self.cfg["start_dir"], "Modules", "Setup.dist") libreadline = get_software_root("libreadline") if libreadline: ncurses = get_software_root("ncurses") if ncurses: readline_libdir = get_software_libdir("libreadline") ncurses_libdir = get_software_libdir("ncurses") readline_static_lib = os.path.join(libreadline, readline_libdir, "libreadline.a") ncurses_static_lib = os.path.join(ncurses, ncurses_libdir, "libncurses.a") readline = "readline readline.c %s %s" % (readline_static_lib, ncurses_static_lib) for line in fileinput.input(modules_setup_dist, inplace="1", backup=".readline"): line = re.sub(r"^#readline readline.c.*", readline, line) sys.stdout.write(line) else: raise EasyBuildError("Both libreadline and ncurses are required to ensure readline support") openssl = get_software_root("OpenSSL") if openssl: for line in fileinput.input(modules_setup_dist, inplace="1", backup=".ssl"): line = re.sub(r"^#SSL=.*", "SSL=%s" % openssl, line) line = re.sub(r"^#(\s*-DUSE_SSL -I)", r"\1", line) line = re.sub(r"^#(\s*-L\$\(SSL\)/lib )", r"\1 -L$(SSL)/lib64 ", line) sys.stdout.write(line) tcl = get_software_root("Tcl") tk = get_software_root("Tk") if tcl and tk: tclver = get_software_version("Tcl") tkver = get_software_version("Tk") tcltk_maj_min_ver = ".".join(tclver.split(".")[:2]) if tcltk_maj_min_ver != ".".join(tkver.split(".")[:2]): raise EasyBuildError("Tcl and Tk major/minor versions don't match: %s vs %s", tclver, tkver) self.cfg.update("configopts", "--with-tcltk-includes='-I%s/include -I%s/include'" % (tcl, tk)) tcl_libdir = os.path.join(tcl, get_software_libdir("Tcl")) tk_libdir = os.path.join(tk, get_software_libdir("Tk")) tcltk_libs = "-L%(tcl_libdir)s -L%(tk_libdir)s -ltcl%(maj_min_ver)s -ltk%(maj_min_ver)s" % { "tcl_libdir": tcl_libdir, "tk_libdir": tk_libdir, "maj_min_ver": tcltk_maj_min_ver, } self.cfg.update("configopts", "--with-tcltk-libs='%s'" % tcltk_libs) super(EB_Python, self).configure_step()
def configure_step(self): # Use separate build directory self.cfg['separate_build_dir'] = True self.cfg['configopts'] += "-DENABLE_TESTS=ON " # Needs wxWidgets self.cfg['configopts'] += "-DBUILD_GUI=OFF " root_python = get_software_root('Python') if root_python: self.log.info("Enabling Python bindings") shortpyver = '.'.join(get_software_version('Python').split('.')[:2]) self.cfg['configopts'] += "-DPYTHON_BINDINGS=ON " self.cfg['configopts'] += "-DPYTHON_LIBRARY=%s/lib/libpython%s.so " % (root_python, shortpyver) self.cfg['configopts'] += "-DPYTHON_INCLUDE_DIR=%s/include/python%s " % (root_python, shortpyver) else: self.log.info("Not enabling Python bindings") root_eigen = get_software_root("Eigen") if root_eigen: self.log.info("Using Eigen") self.cfg['configopts'] += "-DEIGEN3_INCLUDE_DIR='%s/include' " % root_eigen else: self.log.info("Not using Eigen") super(EB_OpenBabel, self).configure_step()
def configure_step(self): """Configure build: - set required environment variables (for netCDF, JasPer) - patch compile script and ungrib Makefile for non-default install paths of WRF and JasPer - run configure script and figure how to select desired build option - patch configure.wps file afterwards to fix 'serial compiler' setting """ # netCDF dependency check + setting env vars (NETCDF, NETCDFF) set_netcdf_env_vars(self.log) self.netcdf_mod_cmds = get_netcdf_module_set_cmds(self.log) # WRF dependency check wrf = get_software_root('WRF') if wrf: majver = get_software_version('WRF').split('.')[0] self.wrfdir = os.path.join(wrf, "WRFV%s" % majver) else: self.log.error("WRF module not loaded?") # patch compile script so that WRF is found self.compile_script = "compile" try: for line in fileinput.input(self.compile_script, inplace=1, backup='.orig.wrf'): line = re.sub(r"^(\s*set\s*WRF_DIR_PRE\s*=\s*)\${DEV_TOP}(.*)$", r"\1%s\2" % self.wrfdir, line) sys.stdout.write(line) except IOError, err: self.log.error("Failed to patch %s script: %s" % (self.compile_script, err))
def extensions_step(self): """Custom extensions procedure for TensorRT.""" super(EB_TensorRT, self).extensions_step() pyver = ''.join(get_software_version('Python').split('.')[:2]) whls = [ os.path.join('graphsurgeon', 'graphsurgeon-*-py2.py3-none-any.whl'), os.path.join('uff', 'uff-*-py2.py3-none-any.whl'), os.path.join('python', 'tensorrt-%s-cp%s-*-linux_x86_64.whl' % (self.version, pyver)), ] for whl in whls: whl_paths = glob.glob(os.path.join(self.installdir, whl)) if len(whl_paths) == 1: cmd = PIP_INSTALL_CMD % { 'installopts': self.cfg['installopts'], 'loc': whl_paths[0], 'prefix': self.installdir, } # Use --no-deps to prevent pip from downloading & installing # any dependencies. They should be listed as extensions in # the easyconfig. # --ignore-installed is required to ensure *this* wheel is installed cmd += " --ignore-installed --no-deps" run_cmd(cmd, log_all=True, simple=True, log_ok=True) else: raise EasyBuildError("Failed to isolate .whl in %s: %s", whl_paths, self.installdir)
def make_module_step(self, fake=False): """ Custom module step for SystemMPI: make 'EBROOT' and 'EBVERSION' reflect actual system MPI version and install path. """ # First let's verify that the toolchain and the compilers under MPI match if self.toolchain.name == DUMMY_TOOLCHAIN_NAME: # If someone is using dummy as the MPI toolchain lets assume that gcc is the compiler underneath MPI c_compiler_name = 'gcc' # Also need to fake the compiler version c_compiler_version = self.c_compiler_version self.log.info("Found dummy toolchain so assuming GCC as compiler underneath MPI and faking the version") else: c_compiler_name = self.toolchain.COMPILER_CC c_compiler_version = get_software_version(self.toolchain.COMPILER_MODULE_NAME[0]) if self.mpi_c_compiler != c_compiler_name or self.c_compiler_version != c_compiler_version: raise EasyBuildError("C compiler for toolchain (%s/%s) and underneath MPI (%s/%s) do not match!", c_compiler_name, c_compiler_version, self.mpi_c_compiler, self.c_compiler_version) # For module file generation: temporarily set version and installdir to system MPI values self.cfg['version'] = self.mpi_version self.installdir = self.mpi_prefix # Generate module res = super(SystemMPI, self).make_module_step(fake=fake) # Reset version and installdir to EasyBuild values self.installdir = self.orig_installdir self.cfg['version'] = self.orig_version return res
def configure_step(self): """Patch 'config.mk' file to use EB stuff""" for (var, env_var) in [('CC', 'CC'), ('CXX', 'CXX'), ('ADD_CFLAGS', 'CFLAGS'), ('ADD_LDFLAGS', 'LDFLAGS')]: self.cfg.update('buildopts', '%s="%s"' % (var, os.getenv(env_var))) toolchain_blas = self.toolchain.definition().get('BLAS', None)[0] if toolchain_blas == 'imkl': blas = "mkl" imkl_version = get_software_version('imkl') if LooseVersion(imkl_version) >= LooseVersion('17'): self.cfg.update('buildopts', 'USE_MKL2017=1') self.cfg.update('buildopts', 'MKLML_ROOT="%s"' % os.getenv("MKLROOT")) elif toolchain_blas in ['ACML', 'ATLAS']: blas = "atlas" elif toolchain_blas == 'OpenBLAS': blas = "openblas" elif toolchain_blas is None: raise EasyBuildError("No BLAS library found in the toolchain") self.cfg.update('buildopts', 'USE_BLAS="%s"' % blas) if get_software_root('NNPACK'): self.cfg.update('buildopts', 'USE_NNPACK=1') super(EB_MXNet, self).configure_step()
def configure_step(self): """Custom configuration procedure for NEURON.""" # enable support for distributed simulations if desired if self.cfg['paranrn']: self.cfg.update('configopts', '--with-paranrn') # specify path to InterViews if it is available as a dependency interviews_root = get_software_root('InterViews') if interviews_root: self.cfg.update('configopts', "--with-iv=%s" % interviews_root) else: self.cfg.update('configopts', "--without-iv") # optionally enable support for Python as alternative interpreter python_root = get_software_root('Python') if python_root: self.with_python = True self.cfg.update('configopts', "--with-nrnpython=%s/bin/python" % python_root) self.pyver = '.'.join(get_software_version('Python').split('.')[0:2]) # determine host CPU type cmd = "./config.guess" (out, ec) = run_cmd(cmd, simple=False) self.hostcpu = out.split('\n')[0].split('-')[0] self.log.debug("Determined host CPU type as %s" % self.hostcpu) # complete configuration with configure_method of parent super(EB_NEURON, self).configure_step()
def configure_step(self): """Configure build of toy.""" # make sure Python system dep is handled correctly when specified if self.cfg['allow_system_deps']: if get_software_root('Python') != 'Python' or get_software_version('Python') != platform.python_version(): self.log.error("Sanity check on allowed Python system dep failed.") os.rename('toy.source', 'toy.c')
def _get_software_version(self, name): """Try to get the software root for name""" version = get_software_version(name) if version is None: raise EasyBuildError("get_software_version software version for %s was not found in environment", name) else: self.log.debug("get_software_version software version %s for %s was found in environment", version, name) return version
def configure_step(self, name=None): """Configure build of toy.""" if name is None: name = self.name # make sure Python system dep is handled correctly when specified if self.cfg['allow_system_deps']: if get_software_root('Python') != 'Python' or get_software_version('Python') != platform.python_version(): raise EasyBuildError("Sanity check on allowed Python system dep failed.") os.rename('%s.source' % name, '%s.c' % name)
def configure_step(self): """Collect altroot/altversion info.""" # pick up altroot/altversion, if they are defined self.altroot = None if self.cfg['altroot']: self.altroot = get_software_root(self.cfg['altroot']) self.altversion = None if self.cfg['altversion']: self.altversion = get_software_version(self.cfg['altversion'])
def _set_fftw_variables(self): if not hasattr(self, "BLAS_LIB_DIR"): raise EasyBuildError("_set_fftw_variables: IntelFFT based on IntelMKL (no BLAS_LIB_DIR found)") imklver = get_software_version(self.FFT_MODULE_NAME[0]) picsuff = "" if self.options.get("pic", None): picsuff = "_pic" bitsuff = "_lp64" if self.options.get("i8", None): bitsuff = "_ilp64" compsuff = "_intel" if get_software_root("icc") is None: if get_software_root("PGI"): compsuff = "_pgi" elif get_software_root("GCC"): compsuff = "_gnu" else: raise EasyBuildError( "Not using Intel compilers, PGI nor GCC, don't know compiler suffix for FFTW libraries." ) fftw_libs = ["fftw3xc%s%s" % (compsuff, picsuff)] if self.options["usempi"]: # add cluster interface for recent imkl versions if LooseVersion(imklver) >= LooseVersion("11.0.2"): fftw_libs.append("fftw3x_cdft%s%s" % (bitsuff, picsuff)) elif LooseVersion(imklver) >= LooseVersion("10.3"): fftw_libs.append("fftw3x_cdft%s" % picsuff) fftw_libs.append("mkl_cdft_core") # add cluster dft fftw_libs.extend(self.variables["LIBBLACS"].flatten()) # add BLACS; use flatten because ListOfList self.log.debug("fftw_libs %s" % fftw_libs.__repr__()) fftw_libs.extend(self.variables["LIBBLAS"].flatten()) # add BLAS libs (contains dft) self.log.debug("fftw_libs %s" % fftw_libs.__repr__()) self.FFT_LIB_DIR = self.BLAS_LIB_DIR self.FFT_INCLUDE_DIR = self.BLAS_INCLUDE_DIR # building the FFTW interfaces is optional, # so make sure libraries are there before FFT_LIB is set imklroot = get_software_root(self.FFT_MODULE_NAME[0]) fft_lib_dirs = [os.path.join(imklroot, d) for d in self.FFT_LIB_DIR] # filter out libraries from list of FFTW libraries to check for if they are not provided by Intel MKL check_fftw_libs = [lib for lib in fftw_libs if lib not in ["dl", "gfortran"]] fftw_lib_exists = lambda x: any([os.path.exists(os.path.join(d, "lib%s.a" % x)) for d in fft_lib_dirs]) if all([fftw_lib_exists(lib) for lib in check_fftw_libs]): self.FFT_LIB = fftw_libs else: msg = "Not all FFTW interface libraries %s are found in %s" % (check_fftw_libs, fft_lib_dirs) msg += ", can't set $FFT_LIB." if self.dry_run: dry_run_warning(msg, silent=build_option("silent")) else: raise EasyBuildError(msg)
def make_module_extra(self): """Set extra environment variables in module file.""" txt = super(EB_XCrySDen, self).make_module_extra() for lib in ['Tcl', 'Tk']: ver = '.'.join(get_software_version(lib).split('.')[0:2]) libpath = os.path.join(get_software_root(lib), 'lib', "%s%s" % (lib.lower(), ver)) txt += self.moduleGenerator.set_environment('%s_LIBRARY' % lib.upper(), libpath) return txt
def configure_step(self): """ Set the CMake options for SuperLU """ self.cfg['separate_build_dir'] = True if self.cfg['build_shared_libs']: self.cfg.update('configopts', '-DBUILD_SHARED_LIBS=ON') self.lib_ext = get_shared_lib_ext() else: self.cfg.update('configopts', '-DBUILD_SHARED_LIBS=OFF') self.lib_ext = 'a' # Add -fPIC flag if necessary pic_flag = ('OFF', 'ON')[self.toolchain.options['pic']] self.cfg.update('configopts', '-DCMAKE_POSITION_INDEPENDENT_CODE=%s' % pic_flag) # Make sure not to build the slow BLAS library included in the package self.cfg.update('configopts', '-Denable_blaslib=OFF') # Set the BLAS library to use # For this, use the BLA_VENDOR option from the FindBLAS module of CMake # Check for all possible values at https://cmake.org/cmake/help/latest/module/FindBLAS.html toolchain_blas = self.toolchain.definition().get('BLAS', None)[0] if toolchain_blas == 'imkl': imkl_version = get_software_version('imkl') if LooseVersion(imkl_version) >= LooseVersion('10'): # 'Intel10_64lp' -> For Intel mkl v10 64 bit,lp thread model, lp64 model # It should work for Intel MKL 10 and above, as long as the library names stay the same # SuperLU requires thread, 'Intel10_64lp_seq' will not work! self.cfg.update('configopts', '-DBLA_VENDOR="Intel10_64lp"') else: # 'Intel' -> For older versions of mkl 32 and 64 bit self.cfg.update('configopts', '-DBLA_VENDOR="Intel"') elif toolchain_blas in ['ACML', 'ATLAS']: self.cfg.update('configopts', '-DBLA_VENDOR="%s"' % toolchain_blas) elif toolchain_blas == 'OpenBLAS': # Unfortunately, OpenBLAS is not recognized by FindBLAS from CMake, # we have to specify the OpenBLAS library manually openblas_lib = os.path.join( get_software_root('OpenBLAS'), get_software_libdir('OpenBLAS'), "libopenblas.a" ) self.cfg.update('configopts', '-DBLAS_LIBRARIES="%s;-pthread"' % openblas_lib) elif toolchain_blas == None: # This toolchain has no BLAS library raise EasyBuildError("No BLAS library found in the toolchain") else: # This BLAS library is not supported yet raise EasyBuildError("BLAS library '%s' is not supported yet", toolchain_blas) super(EB_SuperLU, self).configure_step()
def configure_step(self): """Custom configure step for NAMD, we build charm++ first (if required).""" # complete Charm ++ and NAMD architecture string with compiler family comp_fam = self.toolchain.comp_family() if self.toolchain.options["usempi"]: charm_arch_comp = "mpicxx" else: charm_arch_comps = {toolchain.GCC: "gcc", toolchain.INTELCOMP: "icc"} charm_arch_comp = charm_arch_comps.get(comp_fam, None) namd_comps = {toolchain.GCC: "g++", toolchain.INTELCOMP: "icc"} namd_comp = namd_comps.get(comp_fam, None) if charm_arch_comp is None or namd_comp is None: raise EasyBuildError("Unknown compiler family, can't complete Charm++/NAMD target architecture.") self.cfg.update("charm_arch", charm_arch_comp) self.log.info("Updated 'charm_arch': %s" % self.cfg["charm_arch"]) self.namd_arch = "%s-%s" % (self.cfg["namd_basearch"], namd_comp) self.log.info("Completed NAMD target architecture: %s" % self.namd_arch) charm_tarballs = glob.glob("charm-*.tar") if len(charm_tarballs) != 1: raise EasyBuildError("Expected to find exactly one tarball for Charm++, found: %s", charm_tarballs) extract_file(charm_tarballs[0], os.getcwd()) tup = (self.cfg["charm_arch"], self.cfg["charm_opts"], self.cfg["parallel"], os.environ["CXXFLAGS"]) cmd = "./build charm++ %s %s -j%s %s -DMPICH_IGNORE_CXX_SEEK" % tup charm_subdir = ".".join(os.path.basename(charm_tarballs[0]).split(".")[:-1]) self.log.debug("Building Charm++ using cmd '%s' in '%s'" % (cmd, charm_subdir)) run_cmd(cmd, path=charm_subdir) # compiler (options) self.cfg.update("namd_cfg_opts", '--cc "%s" --cc-opts "%s"' % (os.environ["CC"], os.environ["CFLAGS"])) self.cfg.update("namd_cfg_opts", '--cxx "%s" --cxx-opts "%s"' % (os.environ["CXX"], os.environ["CXXFLAGS"])) # NAMD dependencies: CUDA, FFTW cuda = get_software_root("CUDA") if cuda: self.cfg.update("namd_cfg_opts", "--with-cuda --cuda-prefix %s" % cuda) fftw = get_software_root("FFTW") if fftw: if LooseVersion(get_software_version("FFTW")) >= LooseVersion("3.0"): if LooseVersion(self.version) >= LooseVersion("2.9"): self.cfg.update("namd_cfg_opts", "--with-fftw3") else: raise EasyBuildError("Using FFTW v3.x only supported in NAMD v2.9 and up.") else: self.cfg.update("namd_cfg_opts", "--with-fftw") self.cfg.update("namd_cfg_opts", "--fftw-prefix %s" % fftw) namd_charm_arch = "--charm-arch %s" % "-".join(self.cfg["charm_arch"].strip().split(" ")) cmd = "./config %s %s %s " % (self.namd_arch, namd_charm_arch, self.cfg["namd_cfg_opts"]) run_cmd(cmd)
def _set_optimal_architecture(self, default_optarch=None): """ GCC-specific adjustments for optimal architecture flags. :param default_optarch: default value to use for optarch, rather than using default value based on architecture (--optarch and --optarch=GENERIC still override this value) """ if default_optarch is None and self.arch == systemtools.AARCH64: gcc_version = get_software_version('GCCcore') if gcc_version is None: gcc_version = get_software_version('GCC') if gcc_version is None: raise EasyBuildError("Failed to determine software version for GCC") if LooseVersion(gcc_version) < LooseVersion('6'): # on AArch64, -mcpu=native is not supported prior to GCC 6, # so try to guess a proper default optarch if none was specified default_optarch = self._guess_aarch64_default_optarch() super(Gcc, self)._set_optimal_architecture(default_optarch=default_optarch)
def test_prepare_deps_external(self): """Test preparing for a toolchain when dependencies and external modules are involved.""" deps = [ { 'name': 'OpenMPI', 'version': '1.6.4', 'full_mod_name': 'OpenMPI/1.6.4-GCC-4.6.4', 'short_mod_name': 'OpenMPI/1.6.4-GCC-4.6.4', 'external_module': False, 'external_module_metadata': {}, }, # no metadata available { 'name': None, 'version': None, 'full_mod_name': 'toy/0.0', 'short_mod_name': 'toy/0.0', 'external_module': True, 'external_module_metadata': {}, } ] tc = self.get_toolchain('GCC', version='4.6.4') tc.add_dependencies(deps) tc.prepare() mods = ['GCC/4.6.4', 'hwloc/1.6.2-GCC-4.6.4', 'OpenMPI/1.6.4-GCC-4.6.4', 'toy/0.0'] self.assertTrue([m['mod_name'] for m in modules_tool().list()], mods) self.assertTrue(os.environ['EBROOTTOY'].endswith('software/toy/0.0')) self.assertEqual(os.environ['EBVERSIONTOY'], '0.0') self.assertFalse('EBROOTFOOBAR' in os.environ) # with metadata deps[1] = { 'full_mod_name': 'toy/0.0', 'short_mod_name': 'toy/0.0', 'external_module': True, 'external_module_metadata': { 'name': ['toy', 'foobar'], 'version': ['1.2.3', '4.5'], 'prefix': 'FOOBAR_PREFIX', } } tc = self.get_toolchain('GCC', version='4.6.4') tc.add_dependencies(deps) os.environ['FOOBAR_PREFIX'] = '/foo/bar' tc.prepare() mods = ['GCC/4.6.4', 'hwloc/1.6.2-GCC-4.6.4', 'OpenMPI/1.6.4-GCC-4.6.4', 'toy/0.0'] self.assertTrue([m['mod_name'] for m in modules_tool().list()], mods) self.assertEqual(os.environ['EBROOTTOY'], '/foo/bar') self.assertEqual(os.environ['EBVERSIONTOY'], '1.2.3') self.assertEqual(os.environ['EBROOTFOOBAR'], '/foo/bar') self.assertEqual(os.environ['EBVERSIONFOOBAR'], '4.5') self.assertEqual(modules.get_software_root('foobar'), '/foo/bar') self.assertEqual(modules.get_software_version('toy'), '1.2.3')
def _get_software_version(self, name): """Try to get the software root for name""" version = get_software_version(name) if version is None: self.log.raiseException("get_software_version software version for %s was not found in environment" % (name)) else: self.log.debug("get_software_version software version %s for %s was found in environment" % (version, name)) return version
def det_pylibdir(): """Determine Python library directory.""" # note: we can't rely on distutils.sysconfig.get_python_lib(), # since setuptools and distribute hardcode 'lib/python2.X/site-packages' pyver = get_software_version('Python') if not pyver: log = fancylogger.getLogger('det_pylibdir', fname=False) log.error("Python module not loaded.") else: short_pyver = '.'.join(pyver.split('.')[:2]) return "lib/python%s/site-packages" % short_pyver
def configure_step(self): """Just check whether dependencies (Meep, Python) are available.""" # complete Python packages lib dir pythonver = ".".join(get_software_version('Python').split(".")[0:2]) self.pylibdir = self.pylibdir % pythonver # make sure that required dependencies are loaded deps = ["Meep", "Python"] for dep in deps: if not get_software_root(dep): self.log.error("Module for %s not loaded." % dep)
def _set_fftw_variables(self): if not hasattr(self, 'BLAS_LIB_DIR'): raise EasyBuildError("_set_fftw_variables: IntelFFT based on IntelMKL (no BLAS_LIB_DIR found)") imklver = get_software_version(self.FFT_MODULE_NAME[0]) picsuff = '' if self.options.get('pic', None): picsuff = '_pic' bitsuff = '_lp64' if self.options.get('i8', None): bitsuff = '_ilp64' compsuff = '_intel' if get_software_root('icc') is None: if get_software_root('GCC'): compsuff = '_gnu' else: raise EasyBuildError("Not using Intel compilers or GCC, don't know compiler suffix for FFTW libraries.") fftw_libs = ["fftw3xc%s%s" % (compsuff, picsuff)] if self.options['usempi']: # add cluster interface for recent imkl versions if LooseVersion(imklver) >= LooseVersion("11.0.2"): fftw_libs.append("fftw3x_cdft%s%s" % (bitsuff, picsuff)) elif LooseVersion(imklver) >= LooseVersion("10.3"): fftw_libs.append("fftw3x_cdft%s" % picsuff) fftw_libs.append("mkl_cdft_core") # add cluster dft fftw_libs.extend(self.variables['LIBBLACS'].flatten()) # add BLACS; use flatten because ListOfList self.log.debug('fftw_libs %s' % fftw_libs.__repr__()) fftw_libs.extend(self.variables['LIBBLAS'].flatten()) # add BLAS libs (contains dft) self.log.debug('fftw_libs %s' % fftw_libs.__repr__()) self.FFT_LIB_DIR = self.BLAS_LIB_DIR self.FFT_INCLUDE_DIR = self.BLAS_INCLUDE_DIR # building the FFTW interfaces is optional, # so make sure libraries are there before FFT_LIB is set imklroot = get_software_root(self.FFT_MODULE_NAME[0]) fft_lib_dirs = [os.path.join(imklroot, d) for d in self.FFT_LIB_DIR] # filter out gfortran from list of FFTW libraries to check for, since it's not provided by imkl check_fftw_libs = [lib for lib in fftw_libs if lib != 'gfortran'] fftw_lib_exists = lambda x: any([os.path.exists(os.path.join(d, "lib%s.a" % x)) for d in fft_lib_dirs]) if all([fftw_lib_exists(lib) for lib in check_fftw_libs]): self.FFT_LIB = fftw_libs else: msg = "Not all FFTW interface libraries %s are found in %s" % (check_fftw_libs, fft_lib_dirs) msg += ", can't set $FFT_LIB." if build_option('extended_dry_run'): dry_run_warning(msg, silent=build_option('silent')) else: raise EasyBuildError(msg)
def prepareFFTW(self): """ Prepare for FFTW library """ suffix = '' if get_software_version('FFTW').startswith('3.'): suffix = '3' self.vars['LIBFFT'] = " -lfftw%s " % suffix if self.opts['usempi']: self.vars['LIBFFT'] += " -lfftw%s_mpi " % suffix self._addDependencyVariables(['FFTW'])
def configure_step(self): """Configure Python package build.""" self.python = get_software_root("Python") pyver = ".".join(get_software_version("Python").split(".")[0:2]) self.pylibdir = self.pylibdir % pyver self.log.debug("Python library dir: %s" % self.pylibdir) python_version = get_software_version("Python") if not python_version: self.log.error("Python module not loaded.") if self.sitecfg is not None: # used by some extensions, like numpy, to find certain libs finaltxt = self.sitecfg if self.sitecfglibdir: repl = self.sitecfglibdir finaltxt = finaltxt.replace("SITECFGLIBDIR", repl) if self.sitecfgincdir: repl = self.sitecfgincdir finaltxt = finaltxt.replace("SITECFGINCDIR", repl) self.log.debug("Using %s: %s" % (self.sitecfgfn, finaltxt)) try: if os.path.exists(self.sitecfgfn): txt = open(self.sitecfgfn).read() self.log.debug("Found %s: %s" % (self.sitecfgfn, txt)) config = open(self.sitecfgfn, "w") config.write(finaltxt) config.close() except IOError: self.log.exception("Creating %s failed" % self.sitecfgfn) # creates log entries for python being used, for debugging run_cmd("python -V") run_cmd("which python") run_cmd("python -c 'import sys; print(sys.executable)'")
def configure_step(self): """Custom configuration for ROOT, add configure options.""" # using ./configure is deprecated/broken in recent versions, need to use CMake instead if LooseVersion(self.version.lstrip('v')) >= LooseVersion('6.10'): if self.cfg['arch']: raise EasyBuildError("Specified value '%s' for 'arch' is not used, should not be set", self.cfg['arch']) cfitsio_root = get_software_root('CFITSIO') if cfitsio_root: self.cfg.update('configopts', '-DCFITSIO=%s' % cfitsio_root) fftw_root = get_software_root('FFTW') if fftw_root: self.cfg.update('configopts', '-Dbuiltin_fftw3=OFF -DFFTW_DIR=%s' % fftw_root) gsl_root = get_software_root('GSL') if gsl_root: self.cfg.update('configopts', '-DGSL_DIR=%s' % gsl_root) mesa_root = get_software_root('Mesa') if mesa_root: self.cfg.update('configopts', '-DDOPENGL_INCLUDE_DIR=%s' % os.path.join(mesa_root, 'include')) self.cfg.update('configopts', '-DOPENGL_gl_LIBRARY=%s' % os.path.join(mesa_root, 'lib', 'libGL.so')) python_root = get_software_root('Python') if python_root: pyshortver = '.'.join(get_software_version('Python').split('.')[:2]) self.cfg.update('configopts', '-DPYTHON_EXECUTABLE=%s' % os.path.join(python_root, 'bin', 'python')) python_inc_dir = os.path.join(python_root, 'include', 'python%s' % pyshortver) self.cfg.update('configopts', '-DPYTHON_INCLUDE_DIR=%s' % python_inc_dir) python_lib = os.path.join(python_root, 'lib', 'libpython%s.so' % pyshortver) self.cfg.update('configopts', '-DPYTHON_LIBRARY=%s' % python_lib) if get_software_root('X11'): self.cfg.update('configopts', '-Dx11=ON') self.cfg['separate_build_dir'] = True CMakeMake.configure_step(self) else: if self.cfg['arch'] is None: raise EasyBuildError("No architecture specified to pass to configure script") self.cfg.update('configopts', "--etcdir=%s/etc/root " % self.installdir) cmd = "%s ./configure %s --prefix=%s %s" % (self.cfg['preconfigopts'], self.cfg['arch'], self.installdir, self.cfg['configopts']) run_cmd(cmd, log_all=True, log_ok=True, simple=True)
def configure_intel_based(self): """Configure for Intel based toolchains""" options = self.configure_common() extrainc = '' if self.modincpath: extrainc = '-I%s' % self.modincpath options.update({ ## -Vaxlib : older options 'FREE': '-fpp -free', #SAFE = -assume protect_parens -fp-model precise -ftz # problems 'SAFE': '-assume protect_parens -no-unroll-aggressive', 'INCFLAGS': '$(DFLAGS) -I$(INTEL_INC) -I$(INTEL_INCF) %s' % extrainc, 'LDFLAGS': '$(INCFLAGS) -i-static', 'OBJECTS_ARCHITECTURE': 'machine_intel.o', }) options['DFLAGS'] += ' -D__INTEL' options['FCFLAGSOPT'] += ' $(INCFLAGS) -xHOST -heap-arrays 64 -funroll-loops' options['FCFLAGSOPT2'] += ' $(INCFLAGS) -xHOST -heap-arrays 64' # see http://software.intel.com/en-us/articles/build-cp2k-using-intel-fortran-compiler-professional-edition/ self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n" if LooseVersion(get_software_version('ifort')) >= LooseVersion("2011.8"): self.make_instructions += "et_coupling.o: et_coupling.F\n\t$(FC) -c $(FCFLAGS2) $<\n" self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n" elif LooseVersion(get_software_version('ifort')) >= LooseVersion("2011"): self.log.error("CP2K won't build correctly with the Intel v12 compilers before version 2011.8.") return options
def build_step(self): """Build libsmm Possible iterations over precision (single/double) and type (real/complex) - also type of transpose matrix - all set in the config file Make the config.in file (is source afterwards in the build) """ fn = 'config.in' cfg_tpl = """# This config file was generated by EasyBuild # the build script can generate optimized routines packed in a library for # 1) 'nn' => C=C+MATMUL(A,B) # 2) 'tn' => C=C+MATMUL(TRANSPOSE(A),B) # 3) 'nt' => C=C+MATMUL(A,TRANSPOSE(B)) # 4) 'tt' => C=C+MATMUL(TRANPOSE(A),TRANPOSE(B)) # # select a tranpose_flavor from the list 1 2 3 4 # transpose_flavor=%(transposeflavour)s # 1) d => double precision real # 2) s => single precision real # 3) z => double precision complex # 4) c => single precision complex # # select a data_type from the list 1 2 3 4 # data_type=%(datatype)s # target compiler... this are the options used for building the library. # They should be aggessive enough to e.g. perform vectorization for the specific CPU (e.g. -ftree-vectorize -march=native), # and allow some flexibility in reordering floating point expressions (-ffast-math). # Higher level optimisation (in particular loop nest optimization) should not be used. # target_compile="%(targetcompile)s" # target dgemm link options... these are the options needed to link blas (e.g. -lblas) # blas is used as a fall back option for sizes not included in the library or in those cases where it is faster # the same blas library should thus also be used when libsmm is linked. # OMP_NUM_THREADS=1 blas_linking="%(LIBBLAS)s" # matrix dimensions for which optimized routines will be generated. # since all combinations of M,N,K are being generated the size of the library becomes very large # if too many sizes are being optimized for. Numbers have to be ascending. # dims_small="%(dims)s" # tiny dimensions are used as primitves and generated in an 'exhaustive' search. # They should be a sequence from 1 to N, # where N is a number that is large enough to have good cache performance # (e.g. for modern SSE cpus 8 to 12) # Too large (>12?) is not beneficial, but increases the time needed to build the library # Too small (<8) will lead to a slow library, but the build might proceed quickly # The minimum number for a successful build is 4 # dims_tiny="%(tiny_dims)s" # host compiler... this is used only to compile a few tools needed to build the library. # The library itself is not compiled this way. # This compiler needs to be able to deal with some Fortran2003 constructs. # host_compile="%(hostcompile)s " # number of processes to use in parallel for compiling / building and benchmarking the library. # Should *not* be more than the physical (available) number of cores of the machine # tasks=%(tasks)s """ # only GCC is supported for now if self.toolchain.comp_family() == toolchain.GCC: #@UndefinedVariable hostcompile = os.getenv('F90') # optimizations opts = "-O2 -funroll-loops -ffast-math -ftree-vectorize -march=native -fno-inline-functions" # Depending on the get_version, we need extra options extra = '' gccVersion = LooseVersion(get_software_version('GCC')) if gccVersion >= LooseVersion('4.6'): extra = "-flto" targetcompile = "%s %s %s" % (hostcompile, opts, extra) else: raise EasyBuildError("No supported compiler found (tried GCC)") if not os.getenv('LIBBLAS'): raise EasyBuildError( "No BLAS library specifications found (LIBBLAS not set)!") cfgdict = { 'datatype': None, 'transposeflavour': self.cfg['transpose_flavour'], 'targetcompile': targetcompile, 'hostcompile': hostcompile, 'dims': ' '.join([str(d) for d in self.cfg['dims']]), 'tiny_dims': ' '.join([str(d) for d in range(1, self.cfg['max_tiny_dim'] + 1)]), 'tasks': self.cfg['parallel'], 'LIBBLAS': "%s %s" % (os.getenv('LDFLAGS'), os.getenv('LIBBLAS')) } # configure for various iterations datatypes = [(1, 'double precision real'), (3, 'double precision complex')] for (dt, descr) in datatypes: cfgdict['datatype'] = dt try: txt = cfg_tpl % cfgdict f = open(fn, 'w') f.write(txt) f.close() self.log.debug("config file %s for datatype %s ('%s'): %s" % (fn, dt, descr, txt)) except IOError, err: raise EasyBuildError("Failed to write %s: %s", fn, err) self.log.info("Building for datatype %s ('%s')..." % (dt, descr)) run_cmd("./do_clean") run_cmd("./do_all")
def configure_step(self): """Configure WIEN2k build by patching siteconfig_lapw script and running it.""" self.cfgscript = "siteconfig_lapw" # patch config file first # toolchain-dependent values comp_answer = None if self.toolchain.comp_family( ) == toolchain.INTELCOMP: #@UndefinedVariable if LooseVersion( get_software_version("icc")) >= LooseVersion("2011"): comp_answer = 'I' # Linux (Intel ifort 12.0 compiler + mkl ) else: comp_answer = "K1" # Linux (Intel ifort 11.1 compiler + mkl ) elif self.toolchain.comp_family( ) == toolchain.GCC: #@UndefinedVariable comp_answer = 'V' # Linux (gfortran compiler + gotolib) else: self.log.error("Failed to determine toolchain-dependent answers.") # libraries rlibs = "%s %s" % (os.getenv('LIBLAPACK_MT'), self.toolchain.get_flag('openmp')) rplibs = [os.getenv('LIBSCALAPACK_MT'), os.getenv('LIBLAPACK_MT')] fftwver = get_software_version('FFTW') if fftwver: suff = '' if LooseVersion(fftwver) >= LooseVersion("3"): suff = '3' rplibs.insert(0, "-lfftw%(suff)s_mpi -lfftw%(suff)s" % {'suff': suff}) else: rplibs.append(os.getenv('LIBFFT')) rplibs = ' '.join(rplibs) d = { 'FC': '%s %s' % (os.getenv('F90'), os.getenv('FFLAGS')), 'MPF': "%s %s" % (os.getenv('MPIF90'), os.getenv('FFLAGS')), 'CC': os.getenv('CC'), 'LDFLAGS': '$(FOPT) %s ' % os.getenv('LDFLAGS'), 'R_LIBS': rlibs, # libraries for 'real' (not 'complex') binary 'RP_LIBS': rplibs, # libraries for 'real' parallel binary 'MPIRUN': '', } for line in fileinput.input(self.cfgscript, inplace=1, backup='.orig'): # set config parameters for (k, v) in d.items(): regexp = re.compile('^([a-z0-9]+):%s:.*' % k) res = regexp.search(line) if res: # we need to exclude the lines with 'current', otherwise we break the script if not res.group(1) == "current": line = regexp.sub('\\1:%s:%s' % (k, v), line) # avoid exit code > 0 at end of configuration line = re.sub('(\s+)exit 1', '\\1exit 0', line) sys.stdout.write(line) # set correct compilers env.setvar('bin', os.getcwd()) dc = { 'COMPILERC': os.getenv('CC'), 'COMPILER': os.getenv('F90'), 'COMPILERP': os.getenv('MPIF90'), } for (k, v) in dc.items(): f = open(k, "w") f.write(v) f.close() # configure with patched configure script self.log.debug('%s part I (configure)' % self.cfgscript) cmd = "./%s" % self.cfgscript qanda = { 'Press RETURN to continue': '', 'compiler) Selection:': comp_answer, 'Your compiler:': '', 'Hit Enter to continue': '', 'Shared Memory Architecture? (y/n):': 'n', 'Remote shell (default is ssh) =': '', 'and you need to know details about your installed mpi ..) (y/n)': 'y', 'Recommended setting for parallel f90 compiler: mpiifort ' \ 'Current selection: Your compiler:': os.getenv('MPIF90'), 'Q to quit Selection:': 'Q', 'A Compile all programs (suggested) Q Quit Selection:': 'Q', ' Please enter the full path of the perl program: ': '', 'continue or stop (c/s)': 'c', '(like taskset -c). Enter N / your_specific_command:': 'N', 'If you are using mpi2 set MPI_REMOTE to 0 Set MPI_REMOTE to 0 / 1:': '0', 'Do you have MPI and Scalapack installed and intend to run ' \ 'finegrained parallel? (This is usefull only for BIG cases ' \ '(50 atoms and more / unit cell) and you need to know details ' \ 'about your installed mpi and fftw ) (y/n)': 'y', } no_qa = [ 'You have the following mkl libraries in %s :' % os.getenv('MKLROOT'), "%s[ \t]*.*" % os.getenv('MPIF90'), "%s[ \t]*.*" % os.getenv('F90'), "%s[ \t]*.*" % os.getenv('CC'), ".*SRC_.*", "Please enter the full path of the perl program:", ] std_qa = { r'S\s+Save and Quit[\s\n]+To change an item select option.[\s\n]+Selection:': 'S', } run_cmd_qa(cmd, qanda, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True) # post-configure patches fn = os.path.join(self.cfg['start_dir'], 'parallel_options') remote = self.cfg['remote'] try: for line in fileinput.input(fn, inplace=1, backup='.orig.eb'): if self.cfg['wien_mpirun']: line = re.sub("(setenv WIEN_MPIRUN\s*).*", r'\1 "%s"' % self.cfg['wien_mpirun'], line) sys.stdout.write(line) if remote: f = open(fn, "a") if remote == 'pbsssh': extra = "set remote = pbsssh\n" extra += "setenv PBSSSHENV 'LD_LIBRARY_PATH PATH'\n" else: self.log.error("Don't know how to patch %s for remote %s" % (fn, remote)) f.write(extra) f.close() self.log.debug("Patched file %s: %s" % (fn, open(fn, 'r').read())) except IOError, err: self.log.error("Failed to patch %s: %s" % (fn, err))
class EB_XCrySDen(ConfigureMake): """Support for building/installing XCrySDen.""" def __init__(self, *args, **kwargs): """Initialisation of custom class variables for XCrySDen""" super(EB_XCrySDen, self).__init__(*args, **kwargs) self.tclroot = self.tclver = self.tkroot = self.tkver = 'UNKNOWN' def configure_step(self): """ Check required dependencies, configure XCrySDen build by patching Make.sys file and set make target and installation prefix. """ # check dependencies deps = ["Mesa", "Tcl", "Tk"] for dep in deps: if not get_software_root(dep): raise EasyBuildError("Module for dependency %s not loaded.", dep) # copy template Make.sys to apply_patch makesys_tpl_file = os.path.join("system", "Make.sys-shared") makesys_file = "Make.sys" try: shutil.copy2(makesys_tpl_file, makesys_file) except OSError, err: raise EasyBuildError("Failed to copy %s: %s", makesys_tpl_file, err) self.tclroot = get_software_root("Tcl") self.tclver = '.'.join(get_software_version("Tcl").split('.')[0:2]) self.tkroot = get_software_root("Tk") self.tkver = '.'.join(get_software_version("Tk").split('.')[0:2]) # patch Make.sys settings = { 'CFLAGS': os.getenv('CFLAGS'), 'CC': os.getenv('CC'), 'FFLAGS': os.getenv('F90FLAGS'), 'FC': os.getenv('F90'), 'TCL_LIB': "-L%s/lib -ltcl%s" % (self.tclroot, self.tclver), 'TCL_INCDIR': "-I%s/include" % self.tclroot, 'TK_LIB': "-L%s/lib -ltk%s" % (self.tkroot, self.tkver), 'TK_INCDIR': "-I%s/include" % self.tkroot, 'GLU_LIB': "-L%s/lib -lGLU" % get_software_root("Mesa"), 'GL_LIB': "-L%s/lib -lGL" % get_software_root("Mesa"), 'GL_INCDIR': "-I%s/include" % get_software_root("Mesa"), 'FFTW3_LIB': "-L%s %s -L%s %s" % (os.getenv('FFTW_LIB_DIR'), os.getenv('LIBFFT'), os.getenv('LAPACK_LIB_DIR'), os.getenv('LIBLAPACK_MT')), 'FFTW3_INCDIR': "-I%s" % os.getenv('FFTW_INC_DIR'), 'COMPILE_TCLTK': 'no', 'COMPILE_MESA': 'no', 'COMPILE_FFTW': 'no', 'COMPILE_MESCHACH': 'no' } for line in fileinput.input(makesys_file, inplace=1, backup='.orig'): # set config parameters for (k, v) in settings.items(): regexp = re.compile('^%s(\s+=).*'% k) if regexp.search(line): line = regexp.sub('%s\\1 %s' % (k, v), line) # remove replaced key/value pairs settings.pop(k) sys.stdout.write(line) f = open(makesys_file, "a") # append remaining key/value pairs for (k, v) in settings.items(): f.write("%s = %s\n" % (k, v)) f.close() self.log.debug("Patched Make.sys: %s" % open(makesys_file, "r").read()) # set make target to 'xcrysden', such that dependencies are not downloaded/built self.cfg.update('buildopts', 'xcrysden') # set installation prefix self.cfg.update('preinstallopts', 'prefix=%s' % self.installdir)
class EB_NCL(EasyBlock): """Support for building/installing NCL.""" def configure_step(self): """Configure build: - create Makefile.ini using make and run ymake script to create config file - patch config file with correct settings, and add missing config entries - create config/Site.local file to avoid interactive install - generate Makefile using config/ymkmf sciprt - """ try: os.chdir('config') except OSError, err: self.log.error("Failed to change to the 'config' dir: %s" % err) cmd = "make -f Makefile.ini" run_cmd(cmd, log_all=True, simple=True) cmd = "./ymake -config $PWD" run_cmd(cmd, log_all=True, simple=True) # figure out name of config file cfg_regexp = re.compile('^\s*SYSTEM_INCLUDE\s*=\s*"(.*)"\s*$', re.M) f = open("Makefile", "r") txt = f.read() f.close() cfg_filename = cfg_regexp.search(txt).group(1) # adjust config file as needed ctof_libs = '' ifort = get_software_root('ifort') if ifort: if LooseVersion(get_software_version('ifort')) < LooseVersion('2011.4'): ctof_libs = '-lm -L%s/lib/intel64 -lifcore -lifport' % ifort else: ctof_libs = '-lm -L%s/compiler/lib/intel64 -lifcore -lifport' % ifort elif get_software_root('GCC'): ctof_libs = '-lgfortran -lm' macrodict = { 'CCompiler': os.getenv('CC'), 'FCompiler': os.getenv('F90'), 'CcOptions': '-ansi %s' % os.getenv('CFLAGS'), 'FcOptions': os.getenv('FFLAGS'), 'COptimizeFlag': os.getenv('CFLAGS'), 'FOptimizeFlag': os.getenv('FFLAGS'), 'ExtraSysLibraries': os.getenv('LDFLAGS'), 'CtoFLibraries': ctof_libs } # replace config entries that are already there for line in fileinput.input(cfg_filename, inplace=1, backup='%s.orig' % cfg_filename): for (key, val) in macrodict.items(): regexp = re.compile("(#define %s\s*).*" % key) match = regexp.search(line) if match: line = "#define %s %s\n" % (key, val) macrodict.pop(key) sys.stdout.write(line) # add remaining config entries f = open(cfg_filename, "a") for (key, val) in macrodict.items(): f.write("#define %s %s\n" % (key, val)) f.close() f = open(cfg_filename, "r") self.log.debug("Contents of %s: %s" % (cfg_filename, f.read())) f.close() # configure try: os.chdir(self.cfg['start_dir']) except OSError, err: self.log.error("Failed to change to the build dir %s: %s" % (self.cfg['start_dir'], err))
def build_step(self): """Custom build procedure for TensorFlow.""" # pre-create target installation directory mkdir(os.path.join(self.installdir, self.pylibdir), parents=True) binutils_root = get_software_root('binutils') if binutils_root: binutils_bin = os.path.join(binutils_root, 'bin') else: raise EasyBuildError( "Failed to determine installation prefix for binutils") gcc_root = get_software_root('GCCcore') or get_software_root('GCC') if gcc_root: gcc_lib64 = os.path.join(gcc_root, 'lib64') gcc_ver = get_software_version('GCCcore') or get_software_version( 'GCC') # figure out location of GCC include files res = glob.glob( os.path.join(gcc_root, 'lib', 'gcc', '*', gcc_ver, 'include')) if res and len(res) == 1: gcc_lib_inc = res[0] else: raise EasyBuildError( "Failed to pinpoint location of GCC include files: %s", res) # make sure include-fixed directory is where we expect it to be gcc_lib_inc_fixed = os.path.join(os.path.dirname(gcc_lib_inc), 'include-fixed') if not os.path.exists(gcc_lib_inc_fixed): raise EasyBuildError("Derived directory %s does not exist", gcc_lib_inc_fixed) # also check on location of include/c++/<gcc version> directory gcc_cplusplus_inc = os.path.join(gcc_root, 'include', 'c++', gcc_ver) if not os.path.exists(gcc_cplusplus_inc): raise EasyBuildError("Derived directory %s does not exist", gcc_cplusplus_inc) else: raise EasyBuildError( "Failed to determine installation prefix for GCC") inc_paths = [gcc_lib_inc, gcc_lib_inc_fixed, gcc_cplusplus_inc] lib_paths = [gcc_lib64] cuda_root = get_software_root('CUDA') if cuda_root: inc_paths.append(os.path.join(cuda_root, 'include')) lib_paths.append(os.path.join(cuda_root, 'lib64')) # fix hardcoded locations of compilers & tools cxx_inc_dir_lines = '\n'.join(r'cxx_builtin_include_directory: "%s"' % resolve_path(p) for p in inc_paths) cxx_inc_dir_lines_no_resolv_path = '\n'.join( r'cxx_builtin_include_directory: "%s"' % p for p in inc_paths) regex_subs = [ (r'-B/usr/bin/', '-B%s/ %s' % (binutils_bin, ' '.join('-L%s/' % p for p in lib_paths))), (r'(cxx_builtin_include_directory:).*', ''), (r'^toolchain {', 'toolchain {\n' + cxx_inc_dir_lines + '\n' + cxx_inc_dir_lines_no_resolv_path), ] for tool in [ 'ar', 'cpp', 'dwp', 'gcc', 'gcov', 'ld', 'nm', 'objcopy', 'objdump', 'strip' ]: path = which(tool) if path: regex_subs.append((os.path.join('/usr', 'bin', tool), path)) else: raise EasyBuildError("Failed to determine path to '%s'", tool) # -fPIE/-pie and -fPIC are not compatible, so patch out hardcoded occurences of -fPIE/-pie if -fPIC is used if self.toolchain.options.get('pic', None): regex_subs.extend([('-fPIE', '-fPIC'), ('"-pie"', '"-fPIC"')]) # patch all CROSSTOOL* scripts to fix hardcoding of locations of binutils/GCC binaries for path, dirnames, filenames in os.walk(os.getcwd()): for filename in filenames: if filename.startswith('CROSSTOOL'): full_path = os.path.join(path, filename) self.log.info("Patching %s", full_path) apply_regex_substitutions(full_path, regex_subs) tmpdir = tempfile.mkdtemp(suffix='-bazel-build') user_root_tmpdir = tempfile.mkdtemp(suffix='-user_root') # compose "bazel build" command with all its options... cmd = [ self.cfg['prebuildopts'], 'bazel', '--output_base=%s' % tmpdir, '--install_base=%s' % os.path.join(tmpdir, 'inst_base'), '--output_user_root=%s' % user_root_tmpdir, 'build' ] # build with optimization enabled # cfr. https://docs.bazel.build/versions/master/user-manual.html#flag--compilation_mode cmd.append('--compilation_mode=opt') # select 'opt' config section (this is *not* the same as --compilation_mode=opt!) # https://docs.bazel.build/versions/master/user-manual.html#flag--config cmd.append('--config=opt') # make Bazel print full command line + make it verbose on failures # https://docs.bazel.build/versions/master/user-manual.html#flag--subcommands # https://docs.bazel.build/versions/master/user-manual.html#flag--verbose_failures cmd.extend(['--subcommands', '--verbose_failures']) # limit the number of parallel jobs running simultaneously (useful on KNL)... cmd.append('--jobs=%s' % self.cfg['parallel']) if self.toolchain.options.get('pic', None): cmd.append('--copt="-fPIC"') # include install location of Python packages in $PYTHONPATH, # and specify that value of $PYTHONPATH should be passed down into Bazel build environment; # this is required to make sure that Python packages included as extensions are found at build time; # see also https://github.com/tensorflow/tensorflow/issues/22395 pythonpath = os.getenv('PYTHONPATH', '') env.setvar( 'PYTHONPATH', '%s:%s' % (os.path.join(self.installdir, self.pylibdir), pythonpath)) cmd.append('--action_env=PYTHONPATH') # use same configuration for both host and target programs, which can speed up the build # only done when optarch is enabled, since this implicitely assumes that host and target platform are the same # see https://docs.bazel.build/versions/master/guide.html#configurations if self.toolchain.options.get('optarch'): cmd.append('--distinct_host_configuration=false') cmd.append(self.cfg['buildopts']) if cuda_root: cmd.append('--config=cuda') # if mkl-dnn is listed as a dependency it is used. Otherwise downloaded if with_mkl_dnn is true mkl_root = get_software_root('mkl-dnn') if mkl_root: cmd.extend(['--config=mkl']) cmd.insert(0, "export TF_MKL_DOWNLOAD=0 &&") cmd.insert(0, "export TF_MKL_ROOT=%s &&" % mkl_root) elif self.cfg['with_mkl_dnn']: # this makes TensorFlow use mkl-dnn (cfr. https://github.com/01org/mkl-dnn) cmd.extend(['--config=mkl']) cmd.insert(0, "export TF_MKL_DOWNLOAD=1 && ") # specify target of the build command as last argument cmd.append('//tensorflow/tools/pip_package:build_pip_package') run_cmd(' '.join(cmd), log_all=True, simple=True, log_ok=True) # run generated 'build_pip_package' script to build the .whl cmd = "bazel-bin/tensorflow/tools/pip_package/build_pip_package %s" % self.builddir run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def configure_step(self): """Set DOLFIN-specific configure options and configure with CMake.""" shlib_ext = get_shared_lib_ext() # compilers self.cfg.update('configopts', "-DCMAKE_C_COMPILER='%s' " % os.getenv('CC')) self.cfg.update('configopts', "-DCMAKE_CXX_COMPILER='%s' " % os.getenv('CXX')) self.cfg.update('configopts', "-DCMAKE_Fortran_COMPILER='%s' " % os.getenv('F90')) # compiler flags cflags = os.getenv('CFLAGS') cxxflags = os.getenv('CXXFLAGS') fflags = os.getenv('FFLAGS') # fix for "SEEK_SET is #defined but must not be for the C++ binding of MPI. Include mpi.h before stdio.h" if self.toolchain.mpi_family() in [ toolchain.INTELMPI, toolchain.MPICH, toolchain.MPICH2, toolchain.MVAPICH2 ]: cflags += " -DMPICH_IGNORE_CXX_SEEK" cxxflags += " -DMPICH_IGNORE_CXX_SEEK" fflags += " -DMPICH_IGNORE_CXX_SEEK" self.cfg.update('configopts', '-DCMAKE_C_FLAGS="%s"' % cflags) self.cfg.update('configopts', '-DCMAKE_CXX_FLAGS="%s"' % cxxflags) self.cfg.update('configopts', '-DCMAKE_Fortran_FLAGS="%s"' % fflags) # run cmake in debug mode self.cfg.update('configopts', '-DCMAKE_BUILD_TYPE=Debug') # set correct compilers to be used at runtime self.cfg.update('configopts', '-DMPI_C_COMPILER="$MPICC"') self.cfg.update('configopts', '-DMPI_CXX_COMPILER="$MPICXX"') # specify MPI library self.cfg.update('configopts', '-DMPI_COMPILER="%s"' % os.getenv('MPICC')) if os.getenv('MPI_LIB_SHARED') and os.getenv('MPI_INC_DIR'): self.cfg.update('configopts', '-DMPI_LIBRARY="%s"' % os.getenv('MPI_LIB_SHARED')) self.cfg.update( 'configopts', '-DMPI_INCLUDE_PATH="%s"' % os.getenv('MPI_INC_DIR')) else: raise EasyBuildError( "MPI_LIB_SHARED or MPI_INC_DIR not set, could not determine MPI-related paths." ) # save config options to reuse them later (e.g. for sanity check commands) self.saved_configopts = self.cfg['configopts'] # make sure that required dependencies are loaded deps = [ 'Boost', 'CGAL', 'MTL4', 'ParMETIS', 'PETSc', 'Python', 'SCOTCH', 'Sphinx', 'SLEPc', 'SuiteSparse', 'Trilinos', 'zlib' ] # Armadillo was replaced by Eigen in v1.3 if LooseVersion(self.version) < LooseVersion('1.3'): deps.append('Armadillo') else: deps.append('Eigen') # UFC has been integrated into FFC in v1.4, cfr. https://bitbucket.org/fenics-project/ufc-deprecated if LooseVersion(self.version) < LooseVersion('1.4'): deps.append('UFC') # PLY, petsc4py, slepc4py are required since v1.5 if LooseVersion(self.version) >= LooseVersion('1.5'): deps.extend(['petsc4py', 'PLY', 'slepc4py']) depsdict = {} for dep in deps: deproot = get_software_root(dep) if not deproot: raise EasyBuildError("Dependency %s not available.", dep) else: depsdict.update({dep: deproot}) # zlib self.cfg.update( 'configopts', '-DZLIB_INCLUDE_DIR=%s' % os.path.join(depsdict['zlib'], "include")) self.cfg.update( 'configopts', '-DZLIB_LIBRARY=%s' % os.path.join(depsdict['zlib'], "lib", "libz.a")) # set correct openmp options openmp = self.toolchain.get_flag('openmp') self.cfg.update('configopts', '-DOpenMP_CXX_FLAGS="%s"' % openmp) self.cfg.update('configopts', '-DOpenMP_C_FLAGS="%s"' % openmp) # Boost config parameters self.cfg.update('configopts', "-DBOOST_INCLUDEDIR=%s/include" % depsdict['Boost']) self.cfg.update('configopts', "-DBoost_DEBUG=ON -DBOOST_ROOT=%s" % depsdict['Boost']) self.boost_dir = depsdict['Boost'] # UFC and Armadillo config params if 'UFC' in depsdict: self.cfg.update('configopts', "-DUFC_DIR=%s" % depsdict['UFC']) if 'Armadillo' in depsdict: self.cfg.update('configopts', "-DARMADILLO_DIR:PATH=%s " % depsdict['Armadillo']) # Eigen config params if 'Eigen' in depsdict: self.cfg.update( 'configopts', "-DEIGEN3_INCLUDE_DIR=%s " % os.path.join(depsdict['Eigen'], 'include')) # specify Python paths python = depsdict['Python'] pyver = '.'.join(get_software_version('Python').split('.')[:2]) self.cfg.update( 'configopts', "-DPYTHON_INCLUDE_PATH=%s/include/python%s" % (python, pyver)) self.cfg.update( 'configopts', "-DPYTHON_LIBRARY=%s/lib/libpython%s.%s" % (python, pyver, shlib_ext)) # SuiteSparse config params suitesparse = depsdict['SuiteSparse'] umfpack_params = [ '-DUMFPACK_DIR="%(sp)s/UMFPACK"', '-DUMFPACK_INCLUDE_DIRS="%(sp)s/UMFPACK/include;%(sp)s/UFconfig"', '-DAMD_DIR="%(sp)s/UMFPACK"', '-DCHOLMOD_DIR="%(sp)s/CHOLMOD"', '-DCHOLMOD_INCLUDE_DIRS="%(sp)s/CHOLMOD/include;%(sp)s/UFconfig"', '-DUFCONFIG_DIR="%(sp)s/UFconfig"', '-DCAMD_LIBRARY:PATH="%(sp)s/CAMD/lib/libcamd.a"', '-DCCOLAMD_LIBRARY:PATH="%(sp)s/CCOLAMD/lib/libccolamd.a"', '-DCOLAMD_LIBRARY:PATH="%(sp)s/COLAMD/lib/libcolamd.a"' ] self.cfg.update('configopts', ' '.join(umfpack_params) % {'sp': suitesparse}) # ParMETIS and SCOTCH self.cfg.update('configopts', '-DPARMETIS_DIR="%s"' % depsdict['ParMETIS']) self.cfg.update( 'configopts', '-DSCOTCH_DIR="%s" -DSCOTCH_DEBUG:BOOL=ON' % depsdict['SCOTCH']) # BLACS and LAPACK self.cfg.update('configopts', '-DBLAS_LIBRARIES:PATH="%s"' % os.getenv('LIBBLAS')) self.cfg.update( 'configopts', '-DLAPACK_LIBRARIES:PATH="%s"' % os.getenv('LIBLAPACK')) # CGAL self.cfg.update('configopts', '-DCGAL_DIR:PATH="%s"' % depsdict['CGAL']) # PETSc # need to specify PETSC_ARCH explicitely (env var alone is not sufficient) for env_var in ["PETSC_DIR", "PETSC_ARCH"]: val = os.getenv(env_var) if val: self.cfg.update('configopts', '-D%s=%s' % (env_var, val)) # MTL4 self.cfg.update('configopts', '-DMTL4_DIR:PATH="%s"' % depsdict['MTL4']) # configure out = super(EB_DOLFIN, self).configure_step() # make sure that all optional packages are found not_found_re = re.compile( "The following optional packages could not be found") if not_found_re.search(out): raise EasyBuildError( "Optional packages could not be found, this should not happen..." ) # enable verbose build, so we have enough information if something goes wrong self.cfg.update('buildopts', "VERBOSE=1")
def configure_step(self): """Custom configuration procedure for Quantum ESPRESSO.""" # compose list of DFLAGS (flag, value, keep_stuff) # for guidelines, see include/defs.h.README in sources dflags = [] repls = [] extra_libs = [] comp_fam_dflags = { toolchain.INTELCOMP: '-D__INTEL', toolchain.GCC: '-D__GFORTRAN -D__STD_F95', } comp_fam = self.toolchain.comp_family() if comp_fam in comp_fam_dflags: dflags.append(comp_fam_dflags[comp_fam]) else: raise EasyBuildError( "EasyBuild does not yet have support for QuantumESPRESSO with toolchain %s" % comp_fam) if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']: self.cfg.update('configopts', '--enable-openmp') dflags.append(" -D__OPENMP") if self.toolchain.options.get('usempi', None): dflags.append('-D__MPI -D__PARA') else: self.cfg.update('configopts', '--disable-parallel') if self.cfg['with_scalapack']: dflags.append(" -D__SCALAPACK") if self.toolchain.options.get('usempi', None): if get_software_root("impi") and get_software_root("imkl"): self.cfg.update('configopts', '--with-scalapack=intel') else: self.cfg.update('configopts', '--without-scalapack') libxc = get_software_root("libxc") if libxc: libxc_v = get_software_version("libxc") if LooseVersion(libxc_v) < LooseVersion("3.0.1"): raise EasyBuildError("Must use libxc >= 3.0.1") dflags.append(" -D__LIBXC") repls.append( ('IFLAGS', '-I%s' % os.path.join(libxc, 'include'), True)) extra_libs.append(" -lxcf90 -lxc") hdf5 = get_software_root("HDF5") if hdf5: self.cfg.update('configopts', '--with-hdf5=%s' % hdf5) dflags.append(" -D__HDF5") hdf5_lib_repl = '-L%s/lib -lhdf5hl_fortran -lhdf5_hl -lhdf5_fortran -lhdf5 -lsz -lz -ldl -lm' % hdf5 repls.append(('HDF5_LIB', hdf5_lib_repl, False)) elpa = get_software_root("ELPA") if elpa: if not self.cfg['with_scalapack']: raise EasyBuildError( "ELPA requires ScaLAPACK but 'with_scalapack' is set to False" ) elpa_v = get_software_version("ELPA") if LooseVersion(self.version) >= LooseVersion("6"): elpa_min_ver = "2016.11.001.pre" dflags.append('-D__ELPA_2016') else: elpa_min_ver = "2015" dflags.append('-D__ELPA_2015 -D__ELPA') if LooseVersion(elpa_v) < LooseVersion(elpa_min_ver): raise EasyBuildError("QuantumESPRESSO %s needs ELPA to be " + "version %s or newer" % (self.version, elpa_min_ver)) if self.toolchain.options.get('openmp', False): elpa_include = 'elpa_openmp-%s' % elpa_v elpa_lib = 'libelpa_openmp.a' else: elpa_include = 'elpa-%s' % elpa_v elpa_lib = 'libelpa.a' elpa_include = os.path.join(elpa, 'include', elpa_include) repls.append( ('IFLAGS', '-I%s' % os.path.join(elpa_include, 'modules'), True)) self.cfg.update('configopts', '--with-elpa-include=%s' % elpa_include) elpa_lib = os.path.join(elpa, 'lib', elpa_lib) self.cfg.update('configopts', '--with-elpa-lib=%s' % elpa_lib) if comp_fam == toolchain.INTELCOMP: # set preprocessor command (-E to stop after preprocessing, -C to preserve comments) cpp = "%s -E -C" % os.getenv('CC') repls.append(('CPP', cpp, False)) env.setvar('CPP', cpp) # also define $FCCPP, but do *not* include -C (comments should not be preserved when preprocessing Fortran) env.setvar('FCCPP', "%s -E" % os.getenv('CC')) if comp_fam == toolchain.INTELCOMP: # Intel compiler must have -assume byterecl (see install/configure) repls.append(('F90FLAGS', '-fpp -assume byterecl', True)) repls.append(('FFLAGS', '-assume byterecl', True)) elif comp_fam == toolchain.GCC: repls.append(('F90FLAGS', '-cpp', True)) super(EB_QuantumESPRESSO, self).configure_step() if self.toolchain.options.get('openmp', False): libfft = os.getenv('LIBFFT_MT') else: libfft = os.getenv('LIBFFT') if libfft: if "fftw3" in libfft: dflags.append('-D__FFTW3') else: dflags.append('-D__FFTW') env.setvar('FFTW_LIBS', libfft) if get_software_root('ACML'): dflags.append('-D__ACML') if self.cfg['with_ace']: dflags.append(" -D__EXX_ACE") # always include -w to supress warnings dflags.append('-w') repls.append(('DFLAGS', ' '.join(dflags), False)) # complete C/Fortran compiler and LD flags if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']: repls.append(('LDFLAGS', self.toolchain.get_flag('openmp'), True)) repls.append( ('(?:C|F90|F)FLAGS', self.toolchain.get_flag('openmp'), True)) # obtain library settings libs = [] num_libs = ['BLAS', 'LAPACK', 'FFT'] if self.cfg['with_scalapack']: num_libs.extend(['SCALAPACK']) for lib in num_libs: if self.toolchain.options.get('openmp', False): val = os.getenv('LIB%s_MT' % lib) else: val = os.getenv('LIB%s' % lib) if lib == 'SCALAPACK' and elpa: val = ' '.join([elpa_lib, val]) repls.append(('%s_LIBS' % lib, val, False)) libs.append(val) libs = ' '.join(libs) repls.append(('BLAS_LIBS_SWITCH', 'external', False)) repls.append(('LAPACK_LIBS_SWITCH', 'external', False)) repls.append( ('LD_LIBS', ' '.join(extra_libs + [os.getenv('LIBS')]), False)) # Do not use external FoX. # FoX starts to be used in 6.2 and they use a patched version that # is newer than FoX 4.1.2 which is the latest release. # Ake Sandgren, 20180712 if get_software_root('FoX'): raise EasyBuildError( "Found FoX external module, QuantumESPRESSO" + "must use the version they include with the source.") self.log.debug("List of replacements to perform: %s" % repls) if LooseVersion(self.version) >= LooseVersion("6"): make_ext = '.inc' else: make_ext = '.sys' # patch make.sys file fn = os.path.join(self.cfg['start_dir'], 'make' + make_ext) try: for line in fileinput.input(fn, inplace=1, backup='.orig.eb'): for (k, v, keep) in repls: # need to use [ \t]* instead of \s*, because vars may be undefined as empty, # and we don't want to include newlines if keep: line = re.sub(r"^(%s\s*=[ \t]*)(.*)$" % k, r"\1\2 %s" % v, line) else: line = re.sub(r"^(%s\s*=[ \t]*).*$" % k, r"\1%s" % v, line) # fix preprocessing directives for .f90 files in make.sys if required if LooseVersion(self.version) < LooseVersion("6.0"): if comp_fam == toolchain.GCC: line = re.sub( r"^\t\$\(MPIF90\) \$\(F90FLAGS\) -c \$<", "\t$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" + "\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o", line) sys.stdout.write(line) except IOError as err: raise EasyBuildError("Failed to patch %s: %s", fn, err) self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read())) # patch default make.sys for wannier if LooseVersion(self.version) >= LooseVersion("5"): fn = os.path.join(self.cfg['start_dir'], 'install', 'make_wannier90' + make_ext) else: fn = os.path.join(self.cfg['start_dir'], 'plugins', 'install', 'make_wannier90.sys') try: for line in fileinput.input(fn, inplace=1, backup='.orig.eb'): line = re.sub(r"^(LIBS\s*=\s*).*", r"\1%s" % libs, line) sys.stdout.write(line) except IOError as err: raise EasyBuildError("Failed to patch %s: %s", fn, err) self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read())) # patch Makefile of want plugin wantprefix = 'want-' wantdirs = [ d for d in os.listdir(self.builddir) if d.startswith(wantprefix) ] if len(wantdirs) > 1: raise EasyBuildError( "Found more than one directory with %s prefix, help!", wantprefix) if len(wantdirs) != 0: wantdir = os.path.join(self.builddir, wantdirs[0]) make_sys_in_path = None cand_paths = [ os.path.join('conf', 'make.sys.in'), os.path.join('config', 'make.sys.in') ] for path in cand_paths: full_path = os.path.join(wantdir, path) if os.path.exists(full_path): make_sys_in_path = full_path break if make_sys_in_path is None: raise EasyBuildError( "Failed to find make.sys.in in want directory %s, paths considered: %s", wantdir, ', '.join(cand_paths)) try: for line in fileinput.input(make_sys_in_path, inplace=1, backup='.orig.eb'): # fix preprocessing directives for .f90 files in make.sys if required if comp_fam == toolchain.GCC: line = re.sub( "@f90rule@", "$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" + "\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o", line) sys.stdout.write(line) except IOError as err: raise EasyBuildError("Failed to patch %s: %s", fn, err) # move non-espresso directories to where they're expected and create symlinks try: dirnames = [ d for d in os.listdir(self.builddir) if d not in [self.install_subdir, 'd3q-latest'] ] targetdir = os.path.join(self.builddir, self.install_subdir) for dirname in dirnames: shutil.move(os.path.join(self.builddir, dirname), os.path.join(targetdir, dirname)) self.log.info("Moved %s into %s" % (dirname, targetdir)) dirname_head = dirname.split('-')[0] # Handle the case where the directory is preceded by 'qe-' if dirname_head == 'qe': dirname_head = dirname.split('-')[1] linkname = None if dirname_head == 'sax': linkname = 'SaX' if dirname_head == 'wannier90': linkname = 'W90' elif dirname_head in [ 'd3q', 'gipaw', 'plumed', 'want', 'yambo' ]: linkname = dirname_head.upper() if linkname: os.symlink(os.path.join(targetdir, dirname), os.path.join(targetdir, linkname)) except OSError as err: raise EasyBuildError("Failed to move non-espresso directories: %s", err)
def configure_common(self): """Common configuration for all toolchains""" # openmp introduces 2 major differences # -automatic is default: -noautomatic -auto-scalar # some mem-bandwidth optimisation if self.cfg['type'] == 'psmp': self.openmp = self.toolchain.get_flag('openmp') # determine which opt flags to use if self.cfg['typeopt']: optflags = 'OPT' regflags = 'OPT2' else: optflags = 'NOOPT' regflags = 'NOOPT' # make sure a MPI-2 able MPI lib is used mpi2libs = ['impi', 'MVAPICH2', 'OpenMPI'] mpi2 = False for mpi2lib in mpi2libs: if get_software_root(mpi2lib): mpi2 = True else: self.log.debug("MPI-2 supporting MPI library %s not loaded.") if not mpi2: self.log.error("CP2K needs MPI-2, no known MPI-2 supporting library loaded?") options = { 'CC': os.getenv('MPICC'), 'CPP': '', 'FC': '%s %s' % (os.getenv('MPIF90'), self.openmp), 'LD': '%s %s' % (os.getenv('MPIF90'), self.openmp), 'AR': 'ar -r', 'CPPFLAGS': '', 'FPIC': self.fpic, 'DEBUG': self.debug, 'FCFLAGS': '$(FCFLAGS%s)' % optflags, 'FCFLAGS2': '$(FCFLAGS%s)' % regflags, 'CFLAGS': ' %s %s $(FPIC) $(DEBUG) %s ' % (os.getenv('EBVARCPPFLAGS'), os.getenv('EBVARLDFLAGS'), self.cfg['extracflags']), 'DFLAGS': ' -D__parallel -D__BLACS -D__SCALAPACK -D__FFTSG %s' % self.cfg['extradflags'], 'LIBS': os.getenv('LIBS'), 'FCFLAGSNOOPT': '$(DFLAGS) $(CFLAGS) -O0 $(FREE) $(FPIC) $(DEBUG)', 'FCFLAGSOPT': '-O2 $(FREE) $(SAFE) $(FPIC) $(DEBUG)', 'FCFLAGSOPT2': '-O1 $(FREE) $(SAFE) $(FPIC) $(DEBUG)' } libint = get_software_root('LibInt') if libint: options['DFLAGS'] += ' -D__LIBINT' libintcompiler = "%s %s" % (os.getenv('CC'), os.getenv('CFLAGS')) # Build libint-wrapper, if required libint_wrapper = '' # required for old versions of GCC if not self.compilerISO_C_BINDING: options['DFLAGS'] += ' -D__HAS_NO_ISO_C_BINDING' # determine path for libint_tools dir libinttools_paths = ['libint_tools', 'tools/hfx_tools/libint_tools'] libinttools_path = None for path in libinttools_paths: path = os.path.join(self.cfg['start_dir'], path) if os.path.isdir(path): libinttools_path = path os.chdir(libinttools_path) if not libinttools_path: self.log.error("No libinttools dir found") # build libint wrapper cmd = "%s -c libint_cpp_wrapper.cpp -I%s/include" % (libintcompiler, libint) if not run_cmd(cmd, log_all=True, simple=True): self.log.error("Building the libint wrapper failed") libint_wrapper = '%s/libint_cpp_wrapper.o' % libinttools_path # determine LibInt libraries based on major version number libint_maj_ver = get_software_version('LibInt').split('.')[0] if libint_maj_ver == '1': libint_libs = "$(LIBINTLIB)/libderiv.a $(LIBINTLIB)/libint.a $(LIBINTLIB)/libr12.a" elif libint_maj_ver == '2': libint_libs = "$(LIBINTLIB)/libint2.a" else: self.log.error("Don't know how to handle libint version %s" % libint_maj_ver) self.log.info("Using LibInt version %s" % (libint_maj_ver)) options['LIBINTLIB'] = '%s/lib' % libint options['LIBS'] += ' %s -lstdc++ %s' % (libint_libs, libint_wrapper) else: # throw a warning, since CP2K without LibInt doesn't make much sense self.log.warning("LibInt module not loaded, so building without LibInt support") libxc = get_software_root('libxc') if libxc: cur_libxc_version = get_software_version('libxc') if LooseVersion(cur_libxc_version) < LooseVersion(LIBXC_MIN_VERSION): self.log.error("CP2K only works with libxc v%s (or later)" % LIBXC_MIN_VERSION) options['DFLAGS'] += ' -D__LIBXC2' if LooseVersion(cur_libxc_version) >= LooseVersion('2.2'): options['LIBS'] += ' -L%s/lib -lxcf90 -lxc' % libxc else: options['LIBS'] += ' -L%s/lib -lxc' % libxc self.log.info("Using Libxc-%s" % cur_libxc_version) else: self.log.info("libxc module not loaded, so building without libxc support") return options
def configure_step(self): """Set DOLFIN-specific configure options and configure with CMake.""" shlib_ext = get_shared_lib_ext() # compilers self.cfg.update('configopts', "-DCMAKE_C_COMPILER='%s' " % os.getenv('CC')) self.cfg.update('configopts', "-DCMAKE_CXX_COMPILER='%s' " % os.getenv('CXX')) self.cfg.update('configopts', "-DCMAKE_Fortran_COMPILER='%s' " % os.getenv('F90')) # compiler flags cflags = os.getenv('CFLAGS') cxxflags = os.getenv('CXXFLAGS') fflags = os.getenv('FFLAGS') # fix for "SEEK_SET is #defined but must not be for the C++ binding of MPI. Include mpi.h before stdio.h" if self.toolchain.mpi_family() in [toolchain.INTELMPI, toolchain.MPICH, toolchain.MPICH2, toolchain.MVAPICH2]: cflags += " -DMPICH_IGNORE_CXX_SEEK" cxxflags += " -DMPICH_IGNORE_CXX_SEEK" fflags += " -DMPICH_IGNORE_CXX_SEEK" self.cfg.update('configopts', '-DCMAKE_C_FLAGS="%s"' % cflags) self.cfg.update('configopts', '-DCMAKE_CXX_FLAGS="%s"' % cxxflags) self.cfg.update('configopts', '-DCMAKE_Fortran_FLAGS="%s"' % fflags) # run cmake in debug mode self.cfg.update('configopts', '-DCMAKE_BUILD_TYPE=Debug') # set correct compilers to be used at runtime self.cfg.update('configopts', '-DMPI_C_COMPILER="$MPICC"') self.cfg.update('configopts', '-DMPI_CXX_COMPILER="$MPICXX"') # specify MPI library self.cfg.update('configopts', '-DMPI_COMPILER="%s"' % os.getenv('MPICC')) if os.getenv('MPI_LIB_SHARED') and os.getenv('MPI_INC_DIR'): self.cfg.update('configopts', '-DMPI_LIBRARY="%s"' % os.getenv('MPI_LIB_SHARED')) self.cfg.update('configopts', '-DMPI_INCLUDE_PATH="%s"' % os.getenv('MPI_INC_DIR')) else: raise EasyBuildError("MPI_LIB_SHARED or MPI_INC_DIR not set, could not determine MPI-related paths.") # save config options to reuse them later (e.g. for sanity check commands) self.saved_configopts = self.cfg['configopts'] self.get_deps() # zlib self.cfg.update('configopts', '-DZLIB_INCLUDE_DIR=%s' % os.path.join(self.depsdict['zlib'], "include")) self.cfg.update('configopts', '-DZLIB_LIBRARY=%s' % os.path.join(self.depsdict['zlib'], "lib", "libz.a")) # set correct openmp options openmp = self.toolchain.get_flag('openmp') self.cfg.update('configopts', '-DOpenMP_CXX_FLAGS="%s"' % openmp) self.cfg.update('configopts', '-DOpenMP_C_FLAGS="%s"' % openmp) # Boost config parameters self.cfg.update('configopts', "-DBOOST_INCLUDEDIR=%s/include" % self.depsdict['Boost']) self.cfg.update('configopts', "-DBoost_DEBUG=ON -DBOOST_ROOT=%s" % self.depsdict['Boost']) # UFC and Armadillo config params if 'UFC' in self.depsdict: self.cfg.update('configopts', "-DUFC_DIR=%s" % self.depsdict['UFC']) if 'Armadillo' in self.depsdict: self.cfg.update('configopts', "-DARMADILLO_DIR:PATH=%s " % self.depsdict['Armadillo']) # Eigen config params if 'Eigen' in self.depsdict: self.cfg.update('configopts', "-DEIGEN3_INCLUDE_DIR=%s " % os.path.join(self.depsdict['Eigen'], 'include')) # specify Python paths (outtxt, _) = run_cmd("which python", log_all=True) # Check if the scipy stack includes the interpreter python_in_scipy = re.search("SciPy-Stack",outtxt) if python_in_scipy: python = self.depsdict['SciPy-Stack'] (outtxt, _) = run_cmd("python --version 2>&1 | awk '{print $2}'", log_all=True) pyver = '.'.join(outtxt.split('.')[:2]) else: python = self.depsdict['Python'] pyver = '.'.join(get_software_version('Python').split('.')[:2]) self.cfg.update('configopts', "-DPYTHON_INCLUDE_PATH=%s/include/python%s" % (python, pyver)) if pyver.split('.')[0] == '2': self.cfg.update('configopts', "-DPYTHON_LIBRARY=%s/lib/libpython%s.%s" % (python, pyver, shlib_ext)) else: self.cfg.update('configopts', "-DPYTHON_LIBRARY=%s/lib/libpython%sm.%s" % (python, pyver, shlib_ext)) # SuiteSparse config params suitesparse = self.depsdict['SuiteSparse'] umfpack_params = [ '-DUMFPACK_DIR="%(sp)s/UMFPACK"', '-DUMFPACK_INCLUDE_DIRS="%(sp)s/UMFPACK/include;%(sp)s/UFconfig"', '-DAMD_DIR="%(sp)s/UMFPACK"', '-DCHOLMOD_DIR="%(sp)s/CHOLMOD"', '-DCHOLMOD_INCLUDE_DIRS="%(sp)s/CHOLMOD/include;%(sp)s/UFconfig"', '-DUFCONFIG_DIR="%(sp)s/UFconfig"', '-DCAMD_LIBRARY:PATH="%(sp)s/CAMD/lib/libcamd.a"', '-DCCOLAMD_LIBRARY:PATH="%(sp)s/CCOLAMD/lib/libccolamd.a"', '-DCOLAMD_LIBRARY:PATH="%(sp)s/COLAMD/lib/libcolamd.a"' ] self.cfg.update('configopts', ' '.join(umfpack_params) % {'sp':suitesparse}) # ParMETIS and SCOTCH if 'ParMETIS' in self.depsdict: self.cfg.update('configopts', '-DPARMETIS_DIR="%s"' % self.depsdict['ParMETIS']) elif 'PETSc' in self.depsdict: # We'll try our luck and see if PETSc has ParMETIS included self.cfg.update('configopts', '-DPARMETIS_DIR="%s"' % self.depsdict['PETSc']) self.cfg.update('configopts', '-DSCOTCH_DIR="%s" -DSCOTCH_DEBUG:BOOL=ON' % self.depsdict['SCOTCH']) # BLACS and LAPACK self.cfg.update('configopts', '-DBLAS_LIBRARIES:PATH="%s"' % os.getenv('LIBBLAS')) self.cfg.update('configopts', '-DLAPACK_LIBRARIES:PATH="%s"' % os.getenv('LIBLAPACK')) # CGAL self.cfg.update('configopts', '-DCGAL_DIR:PATH="%s"' % self.depsdict['CGAL']) if 'PETSc' in self.depsdict: # PETSc # need to specify PETSC_ARCH explicitely (env var alone is not sufficient) for env_var in ["PETSC_DIR", "PETSC_ARCH"]: val = os.getenv(env_var) if val: self.cfg.update('configopts', '-D%s=%s' % (env_var, val)) if 'MTL4' in self.depsdict: # MTL4 self.cfg.update('configopts', '-DMTL4_DIR:PATH="%s"' % self.depsdict['MTL4']) # configure out = super(EB_DOLFIN, self).configure_step() # make sure that all optional packages are found not_found_re = re.compile("The following optional packages could not be found") if not_found_re.search(out): raise EasyBuildError("Optional packages could not be found, this should not happen...") # enable verbose build, so we have enough information if something goes wrong self.cfg.update('buildopts', "VERBOSE=1")
def configure_step(self): """Configure build: - set required environment variables (for netCDF, JasPer) - patch compile script and ungrib Makefile for non-default install paths of WRF and JasPer - run configure script and figure how to select desired build option - patch configure.wps file afterwards to fix 'serial compiler' setting """ # netCDF dependency check + setting env vars (NETCDF, NETCDFF) set_netcdf_env_vars(self.log) # WRF dependency check wrf = get_software_root('WRF') if wrf: wrfdir = os.path.join(wrf, det_wrf_subdir(get_software_version('WRF'))) else: raise EasyBuildError("WRF module not loaded?") # patch compile script so that WRF is found self.compile_script = "compile" regex_subs = [(r"^(\s*set\s*WRF_DIR_PRE\s*=\s*)\${DEV_TOP}(.*)$", r"\1%s\2" % wrfdir)] apply_regex_substitutions(self.compile_script, regex_subs) # libpng dependency check libpng = get_software_root('libpng') zlib = get_software_root('zlib') if libpng: paths = [libpng] if zlib: paths.insert(0, zlib) libpnginc = ' '.join( ['-I%s' % os.path.join(path, 'include') for path in paths]) libpnglib = ' '.join( ['-L%s' % os.path.join(path, 'lib') for path in paths]) else: # define these as empty, assume that libpng will be available via OS (e.g. due to --filter-deps=libpng) libpnglib = "" libpnginc = "" # JasPer dependency check + setting env vars jasper = get_software_root('JasPer') if jasper: env.setvar('JASPERINC', os.path.join(jasper, "include")) jasperlibdir = os.path.join(jasper, "lib") env.setvar('JASPERLIB', jasperlibdir) jasperlib = "-L%s" % jasperlibdir else: raise EasyBuildError("JasPer module not loaded?") # patch ungrib Makefile so that JasPer is found jasperlibs = "%s -ljasper %s -lpng" % (jasperlib, libpnglib) regex_subs = [ (r"^(\s*-L\.\s*-l\$\(LIBTARGET\))(\s*;.*)$", r"\1 %s\2" % jasperlibs), (r"^(\s*\$\(COMPRESSION_LIBS\))(\s*;.*)$", r"\1 %s\2" % jasperlibs), ] apply_regex_substitutions(os.path.join('ungrib', 'src', 'Makefile'), regex_subs) # patch arch/Config.pl script, so that run_cmd_qa receives all output to answer questions patch_perl_script_autoflush(os.path.join("arch", "Config.pl")) # configure # determine build type option to look for self.comp_fam = self.toolchain.comp_family() build_type_option = None if LooseVersion(self.version) >= LooseVersion("3.4"): knownbuildtypes = {'smpar': 'serial', 'dmpar': 'dmpar'} if self.comp_fam == toolchain.INTELCOMP: # @UndefinedVariable build_type_option = " Linux x86_64, Intel compiler" elif self.comp_fam == toolchain.GCC: # @UndefinedVariable build_type_option = "Linux x86_64 g95 compiler" else: raise EasyBuildError( "Don't know how to figure out build type to select.") else: knownbuildtypes = {'smpar': 'serial', 'dmpar': 'DM parallel'} if self.comp_fam == toolchain.INTELCOMP: # @UndefinedVariable build_type_option = "PC Linux x86_64, Intel compiler" elif self.comp_fam == toolchain.GCC: # @UndefinedVariable build_type_option = "PC Linux x86_64, gfortran compiler," knownbuildtypes['dmpar'] = knownbuildtypes['dmpar'].upper() else: raise EasyBuildError( "Don't know how to figure out build type to select.") # check and fetch selected build type bt = self.cfg['buildtype'] if bt not in knownbuildtypes.keys(): raise EasyBuildError( "Unknown build type: '%s'. Supported build types: %s", bt, knownbuildtypes.keys()) # fetch option number based on build type option and selected build type build_type_question = "\s*(?P<nr>[0-9]+).\s*%s\s*\(?%s\)?\s*\n" % ( build_type_option, knownbuildtypes[bt]) cmd = "./configure" qa = {} no_qa = [".*compiler is.*"] std_qa = { # named group in match will be used to construct answer r"%s(.*\n)*Enter selection\s*\[[0-9]+-[0-9]+\]\s*:" % build_type_question: "%(nr)s", } run_cmd_qa(cmd, qa, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True) # make sure correct compilers and compiler flags are being used comps = { 'SCC': "%s -I$(JASPERINC) %s" % (os.getenv('CC'), libpnginc), 'SFC': os.getenv('F90'), 'DM_FC': os.getenv('MPIF90'), 'DM_CC': os.getenv('MPICC'), 'FC': os.getenv('MPIF90'), 'CC': os.getenv('MPICC'), } regex_subs = [(r"^(%s\s*=\s*).*$" % key, r"\1 %s" % val) for (key, val) in comps.items()] apply_regex_substitutions('configure.wps', regex_subs)
def prepare_step(self): """Custom prepare step for Tau: check required dependencies and collect information on them.""" super(EB_TAU, self).prepare_step() # install prefixes for selected backends self.backend_opts = {'tau': ''} for backend_name, dep in KNOWN_BACKENDS.items(): root = get_software_root(dep) if backend_name in self.cfg['extra_backends']: if root: self.backend_opts[backend_name] = "-%s=%s" % (backend_name, root) else: raise EasyBuildError( "%s is listed in extra_backends, but not available as a dependency", dep) elif root: raise EasyBuildError( "%s included as dependency, but '%s' not in extra_backends", dep, backend_name) # make sure Scalasca v1.x is used as a dependency (if it's there) if 'scalasca' in self.backend_opts and get_software_version( 'Scalasca').split('.')[0] != '1': raise EasyBuildError( "Scalasca v1.x must be used when scalasca backend is enabled") # determine values for compiler flags to use known_compilers = { toolchain.CLANGGCC: ['clang', 'clang++', 'gfortran'], toolchain.GCC: ['gcc', 'g++', 'gfortran'], toolchain.INTELCOMP: ['icc', 'icpc', 'intel'], } comp_fam = self.toolchain.comp_family() if comp_fam in known_compilers: self.cc, self.cxx, self.fortran = known_compilers[comp_fam] # determine values for MPI flags self.mpi_inc_dir, self.mpi_lib_dir = os.getenv( 'MPI_INC_DIR'), os.getenv('MPI_LIB_DIR') # determine value for optional packages option template self.opt_pkgs_opts = '' for dep, opt in [('PAPI', 'papi'), ('PDT', 'pdt'), ('binutils', 'bfd')]: root = get_software_root(dep) if root: self.opt_pkgs_opts += ' -%s=%s' % (opt, root) # determine list of labels, based on selected (extra) backends, variants and optional packages self.variant_labels = [] backend_labels = [ '', '-epilog-scalasca-trace', '-scorep', '-vampirtrace-trace' ] for backend, backend_label in zip( ['tau'] + sorted(KNOWN_BACKENDS.keys()), backend_labels): if backend in ['tau'] + self.cfg['extra_backends']: for pref, suff in [('-mpi', ''), ('', '-openmp-opari'), ('-mpi', '-openmp-opari')]: variant_label = 'tau' # For non-GCC builds, the compiler name is encoded in the variant if self.cxx and self.cxx != 'g++': variant_label += '-' + self.cxx if get_software_root('PAPI'): variant_label += '-papi' variant_label += pref if get_software_root('PDT'): variant_label += '-pdt' variant_label += suff + backend_label self.variant_labels.append(variant_label) # create install directory and make sure it does not get cleaned up again in the install step; # the first configure iteration already puts things in place in the install directory, # so that shouldn't get cleaned up afterwards... self.log.info( "Creating install dir %s before starting configure-build-install iterations", self.installdir) super(EB_TAU, self).make_installdir()
def configure_step(self): """Configure GAMESS-US build via provided interactive 'config' script.""" # machine type platform_name = get_platform_name() x86_64_linux_re = re.compile('^x86_64-.*$') if x86_64_linux_re.match(platform_name): machinetype = "linux64" else: raise EasyBuildError("Build target %s currently unsupported", platform_name) # compiler config comp_fam = self.toolchain.comp_family() fortran_comp, fortran_ver = None, None if comp_fam == toolchain.INTELCOMP: fortran_comp = 'ifort' (out, _) = run_cmd("ifort -v", simple=False) res = re.search(r"^ifort version ([0-9]+)\.[0-9.]+$", out) if res: fortran_ver = res.group(1) else: raise EasyBuildError("Failed to determine ifort major version number") elif comp_fam == toolchain.GCC: fortran_comp = 'gfortran' fortran_ver = '.'.join(get_software_version('GCC').split('.')[:2]) else: raise EasyBuildError("Compiler family '%s' currently unsupported.", comp_fam) # math library config known_mathlibs = ['imkl', 'OpenBLAS', 'ATLAS', 'ACML'] mathlib, mathlib_root = None, None for mathlib in known_mathlibs: mathlib_root = get_software_root(mathlib) if mathlib_root is not None: break if mathlib_root is None: raise EasyBuildError("None of the known math libraries (%s) available, giving up.", known_mathlibs) if mathlib == 'imkl': mathlib = 'mkl' mathlib_root = os.path.join(mathlib_root, 'mkl') else: mathlib = mathlib.lower() # verify selected DDI communication layer known_ddi_comms = ['mpi', 'mixed', 'shmem', 'sockets'] if not self.cfg['ddi_comm'] in known_ddi_comms: raise EasyBuildError("Unsupported DDI communication layer specified (known: %s): %s", known_ddi_comms, self.cfg['ddi_comm']) # MPI library config mpilib, mpilib_root, mpilib_path = None, None, None if self.cfg['ddi_comm'] == 'mpi': known_mpilibs = ['impi', 'OpenMPI', 'MVAPICH2', 'MPICH2'] for mpilib in known_mpilibs: mpilib_root = get_software_root(mpilib) if mpilib_root is not None: break if mpilib_root is None: raise EasyBuildError("None of the known MPI libraries (%s) available, giving up.", known_mpilibs) mpilib_path = mpilib_root if mpilib == 'impi': mpilib_path = os.path.join(mpilib_root, 'intel64') else: mpilib = mpilib.lower() # run interactive 'config' script to generate install.info file cmd = "%(preconfigopts)s ./config %(configopts)s" % { 'preconfigopts': self.cfg['preconfigopts'], 'configopts': self.cfg['configopts'], } qa = { "After the new window is open, please hit <return> to go on.": '', "please enter your target machine name: ": machinetype, "Version? [00] ": self.version, "Please enter your choice of FORTRAN: ": fortran_comp, "hit <return> to continue to the math library setup.": '', "MKL pathname? ": mathlib_root, "MKL version (or 'skip')? ": 'skip', "please hit <return> to compile the GAMESS source code activator": '', "please hit <return> to set up your network for Linux clusters.": '', "communication library ('sockets' or 'mpi')? ": self.cfg['ddi_comm'], "Enter MPI library (impi, mvapich2, mpt, sockets):": mpilib, "Please enter your %s's location: " % mpilib: mpilib_root, "Do you want to try LIBCCHEM? (yes/no): ": 'no', "Enter full path to OpenBLAS libraries (without 'lib' subdirectory):": mathlib_root, } stdqa = { r"GAMESS directory\? \[.*\] ": self.builddir, r"GAMESS build directory\? \[.*\] ": self.installdir, # building in install directory r"Enter only the main version number, such as .*\nVersion\? ": fortran_ver, r"gfortran version.\nPlease enter only the first decimal place, such as .*:": fortran_ver, "Enter your choice of 'mkl' or .* 'none': ": mathlib, } run_cmd_qa(cmd, qa=qa, std_qa=stdqa, log_all=True, simple=True) self.log.debug("Contents of install.info:\n%s" % read_file(os.path.join(self.builddir, 'install.info'))) # patch hardcoded settings in rungms to use values specified in easyconfig file rungms = os.path.join(self.builddir, 'rungms') extra_gmspath_lines = "set ERICFMT=$GMSPATH/auxdata/ericfmt.dat\nset MCPPATH=$GMSPATH/auxdata/MCP\n" try: for line in fileinput.input(rungms, inplace=1, backup='.orig'): line = re.sub(r"^(\s*set\s*TARGET)=.*", r"\1=%s" % self.cfg['ddi_comm'], line) line = re.sub(r"^(\s*set\s*GMSPATH)=.*", r"\1=%s\n%s" % (self.installdir, extra_gmspath_lines), line) line = re.sub(r"(null\) set VERNO)=.*", r"\1=%s" % self.version, line) line = re.sub(r"^(\s*set DDI_MPI_CHOICE)=.*", r"\1=%s" % mpilib, line) line = re.sub(r"^(\s*set DDI_MPI_ROOT)=.*%s.*" % mpilib.lower(), r"\1=%s" % mpilib_path, line) line = re.sub(r"^(\s*set GA_MPI_ROOT)=.*%s.*" % mpilib.lower(), r"\1=%s" % mpilib_path, line) # comment out all adjustments to $LD_LIBRARY_PATH that involves hardcoded paths line = re.sub(r"^(\s*)(setenv\s*LD_LIBRARY_PATH\s*/.*)", r"\1#\2", line) if self.cfg['scratch_dir']: line = re.sub(r"^(\s*set\s*SCR)=.*", r"\1=%s" % self.cfg['scratch_dir'], line) line = re.sub(r"^(\s*set\s*USERSCR)=.*", r"\1=%s" % self.cfg['scratch_dir'], line) sys.stdout.write(line) except IOError, err: raise EasyBuildError("Failed to patch %s: %s", rungms, err)
def install_step(self): """Install by running install command.""" default_cuda_version = self.cfg['default_cuda_version'] if default_cuda_version is None: module_cuda_version_full = get_software_version('CUDA') if module_cuda_version_full is not None: default_cuda_version = '.'.join( module_cuda_version_full.split('.')[:2]) else: error_msg = "A default CUDA version is needed for installation of NVHPC. " error_msg += "It can not be determined automatically and needs to be added manually. " error_msg += "You can edit the easyconfig file, " error_msg += "or use 'eb --try-amend=default_cuda_version=<version>'." raise EasyBuildError(error_msg) # Parse default_compute_capability from different sources (CLI has priority) ec_default_compute_capability = self.cfg['cuda_compute_capabilities'] cfg_default_compute_capability = build_option( 'cuda_compute_capabilities') if cfg_default_compute_capability is not None: default_compute_capability = cfg_default_compute_capability elif ec_default_compute_capability and ec_default_compute_capability is not None: default_compute_capability = ec_default_compute_capability else: error_msg = "A default Compute Capability is needed for installation of NVHPC." error_msg += "Please provide it either in the easyconfig file like 'cuda_compute_capabilities=\"7.0\"'," error_msg += "or use 'eb --cuda-compute-capabilities=7.0' from the command line." raise EasyBuildError(error_msg) # Extract first element of default_compute_capability list, if it is a list if isinstance(default_compute_capability, list): _before_default_compute_capability = default_compute_capability default_compute_capability = _before_default_compute_capability[0] warning_msg = "Replaced list of compute capabilities {} ".format( _before_default_compute_capability) warning_msg += "with first element of list {}".format( default_compute_capability) print_warning(warning_msg) # Remove dot-divider for CC; error out if it is not a string if isinstance(default_compute_capability, str): default_compute_capability = default_compute_capability.replace( '.', '') else: raise EasyBuildError( "Unexpected non-string value encountered for compute capability: %s", default_compute_capability) nvhpc_env_vars = { 'NVHPC_INSTALL_DIR': self.installdir, 'NVHPC_SILENT': 'true', 'NVHPC_DEFAULT_CUDA': str(default_cuda_version), # 10.2, 11.0 'NVHPC_STDPAR_CUDACC': str(default_compute_capability), # 70, 80; single value, no list! } cmd = "%s ./install" % ' '.join( ['%s=%s' % x for x in sorted(nvhpc_env_vars.items())]) run_cmd(cmd, log_all=True, simple=True) # make sure localrc uses GCC in PATH, not always the system GCC, and does not use a system g77 but gfortran install_abs_subdir = os.path.join(self.installdir, self.nvhpc_install_subdir) compilers_subdir = os.path.join(install_abs_subdir, "compilers") makelocalrc_filename = os.path.join(compilers_subdir, "bin", "makelocalrc") for line in fileinput.input(makelocalrc_filename, inplace='1', backup='.orig'): line = re.sub(r"^PATH=/", r"#PATH=/", line) sys.stdout.write(line) cmd = "%s -x %s -g77 /" % (makelocalrc_filename, compilers_subdir) run_cmd(cmd, log_all=True, simple=True) # If an OS libnuma is NOT found, makelocalrc creates symbolic links to libpgnuma.so # If we use the EB libnuma, delete those symbolic links to ensure they are not used if get_software_root("numactl"): for filename in ["libnuma.so", "libnuma.so.1"]: path = os.path.join(compilers_subdir, "lib", filename) if os.path.islink(path): os.remove(path) # install (or update) siterc file to make NVHPC consider $LIBRARY_PATH siterc_path = os.path.join(compilers_subdir, 'bin', 'siterc') write_file(siterc_path, SITERC_LIBRARY_PATH, append=True) self.log.info( "Appended instructions to pick up $LIBRARY_PATH to siterc file at %s: %s", siterc_path, SITERC_LIBRARY_PATH) # The cuda nvvp tar file has broken permissions adjust_permissions(self.installdir, stat.S_IWUSR, add=True, onlydirs=True)
def configure_step(self): """Configure OpenFOAM build by setting appropriate environment variables.""" # compiler & compiler flags comp_fam = self.toolchain.comp_family() extra_flags = '' if comp_fam == toolchain.GCC: # @UndefinedVariable self.wm_compiler = 'Gcc' if get_software_version('GCC') >= LooseVersion('4.8'): # make sure non-gold version of ld is used, since OpenFOAM requires it # see http://www.openfoam.org/mantisbt/view.php?id=685 extra_flags = '-fuse-ld=bfd' # older versions of OpenFOAM-Extend require -fpermissive if 'extend' in self.name.lower() and LooseVersion( self.version) < LooseVersion('2.0'): extra_flags += ' -fpermissive' elif comp_fam == toolchain.INTELCOMP: # @UndefinedVariable self.wm_compiler = 'Icc' # make sure -no-prec-div is used with Intel compilers extra_flags = '-no-prec-div' else: raise EasyBuildError( "Unknown compiler family, don't know how to set WM_COMPILER") for env_var in ['CFLAGS', 'CXXFLAGS']: env.setvar(env_var, "%s %s" % (os.environ.get(env_var, ''), extra_flags)) # patch out hardcoding of WM_* environment variables # for example, replace 'export WM_COMPILER=Gcc' with ': ${WM_COMPILER:=Gcc}; export WM_COMPILER' for script in [ os.path.join(self.builddir, self.openfoamdir, x) for x in ['etc/bashrc', 'etc/cshrc'] ]: self.log.debug("Patching out hardcoded $WM_* env vars in %s", script) # disable any third party stuff, we use EB controlled builds regex_subs = [(r"^(setenv|export) WM_THIRD_PARTY_USE_.*[ =].*$", r"# \g<0>")] WM_env_var = ['WM_COMPILER', 'WM_MPLIB', 'WM_THIRD_PARTY_DIR'] # OpenFOAM >= 3.0.0 can use 64 bit integers if 'extend' not in self.name.lower() and LooseVersion( self.version) >= LooseVersion('3.0'): WM_env_var.append('WM_LABEL_SIZE') for env_var in WM_env_var: regex_subs.append( (r"^(setenv|export) (?P<var>%s)[ =](?P<val>.*)$" % env_var, r": ${\g<var>:=\g<val>}; export \g<var>")) apply_regex_substitutions(script, regex_subs) # inject compiler variables into wmake/rules files ldirs = glob.glob( os.path.join(self.builddir, self.openfoamdir, 'wmake', 'rules', 'linux*')) langs = ['c', 'c++'] suffixes = ['', 'Opt'] wmake_rules_files = [ os.path.join(ldir, lang + suff) for ldir in ldirs for lang in langs for suff in suffixes ] mpicc = os.environ['MPICC'] mpicxx = os.environ['MPICXX'] cc_seq = os.environ.get('CC_SEQ', os.environ['CC']) cxx_seq = os.environ.get('CXX_SEQ', os.environ['CXX']) if self.toolchain.mpi_family() == toolchain.OPENMPI: # no -cc/-cxx flags supported in OpenMPI compiler wrappers c_comp_cmd = 'OMPI_CC="%s" %s' % (cc_seq, mpicc) cxx_comp_cmd = 'OMPI_CXX="%s" %s' % (cxx_seq, mpicxx) else: # -cc/-cxx should work for all MPICH-based MPIs (including Intel MPI) c_comp_cmd = '%s -cc="%s"' % (mpicc, cc_seq) cxx_comp_cmd = '%s -cxx="%s"' % (mpicxx, cxx_seq) comp_vars = { # specify MPI compiler wrappers and compiler commands + sequential compiler that should be used by them 'cc': c_comp_cmd, 'CC': cxx_comp_cmd, 'cOPT': os.environ['CFLAGS'], 'c++OPT': os.environ['CXXFLAGS'], } for wmake_rules_file in wmake_rules_files: fullpath = os.path.join(self.builddir, self.openfoamdir, wmake_rules_file) self.log.debug("Patching compiler variables in %s", fullpath) regex_subs = [] for comp_var, newval in comp_vars.items(): regex_subs.append((r"^(%s\s*=\s*).*$" % re.escape(comp_var), r"\1%s" % newval)) apply_regex_substitutions(fullpath, regex_subs) # enable verbose build for debug purposes # starting with openfoam-extend 3.2, PS1 also needs to be set env.setvar("FOAM_VERBOSE", '1') # installation directory env.setvar("FOAM_INST_DIR", self.installdir) # third party directory self.thrdpartydir = "ThirdParty-%s" % self.version # only if third party stuff is actually installed if os.path.exists(self.thrdpartydir): os.symlink(os.path.join("..", self.thrdpartydir), self.thrdpartydir) env.setvar("WM_THIRD_PARTY_DIR", os.path.join(self.installdir, self.thrdpartydir)) env.setvar("WM_COMPILER", self.wm_compiler) # set to an MPI unknown by OpenFOAM, since we're handling the MPI settings ourselves (via mpicc, etc.) # Note: this name must contain 'MPI' so the MPI version of the Pstream library is built (cf src/Pstream/Allwmake) self.wm_mplib = "EASYBUILDMPI" env.setvar("WM_MPLIB", self.wm_mplib) # parallel build spec env.setvar("WM_NCOMPPROCS", str(self.cfg['parallel'])) # OpenFOAM >= 3.0.0 can use 64 bit integers if 'extend' not in self.name.lower() and LooseVersion( self.version) >= LooseVersion('3.0'): if self.toolchain.options['i8']: env.setvar("WM_LABEL_SIZE", '64') else: env.setvar("WM_LABEL_SIZE", '32') # make sure lib/include dirs for dependencies are found openfoam_extend_v3 = 'extend' in self.name.lower() and LooseVersion( self.version) >= LooseVersion('3.0') if LooseVersion( self.version) < LooseVersion("2") or openfoam_extend_v3: self.log.debug("List of deps: %s" % self.cfg.dependencies()) for dep in self.cfg.dependencies(): dep_name = dep['name'].upper(), dep_root = get_software_root(dep['name']) env.setvar("%s_SYSTEM" % dep_name, "1") dep_vars = { "%s_DIR": "%s", "%s_BIN_DIR": "%s/bin", "%s_LIB_DIR": "%s/lib", "%s_INCLUDE_DIR": "%s/include", } for var, val in dep_vars.iteritems(): env.setvar(var % dep_name, val % dep_root) else: for depend in ['SCOTCH', 'METIS', 'CGAL', 'Paraview']: dependloc = get_software_root(depend) if dependloc: if depend == 'CGAL' and get_software_root('Boost'): env.setvar("CGAL_ROOT", dependloc) env.setvar("BOOST_ROOT", get_software_root('Boost')) else: env.setvar("%s_ROOT" % depend.upper(), dependloc)
def configure_step(self): """Custom configure step for jaxlib.""" super(EB_jaxlib, self).configure_step() binutils_root = get_software_root('binutils') if not binutils_root: raise EasyBuildError( "Failed to determine installation prefix for binutils") config_env_vars = { # This is the binutils bin folder: https://github.com/tensorflow/tensorflow/issues/39263 'GCC_HOST_COMPILER_PREFIX': os.path.join(binutils_root, 'bin'), } # Collect options for the build script # Used only by the build script # C++ flags are set through copt below options = ['--target_cpu_features=default'] # Passed directly to bazel bazel_startup_options = [ '--output_user_root=%s' % tempfile.mkdtemp(suffix='-bazel', dir=self.builddir), ] # Passed to the build command of bazel bazel_options = [ '--jobs=%s' % self.cfg['parallel'], '--subcommands', '--action_env=PYTHONPATH', '--action_env=EBPYTHONPREFIXES', ] if self.toolchain.options.get('debug', None): bazel_options.extend(['--strip=never', '--copt="-Og"']) # Add optimization flags set by EasyBuild each as a separate option bazel_options.extend( ['--copt=%s' % i for i in os.environ['CXXFLAGS'].split(' ')]) cuda_root = get_software_root('CUDA') if cuda_root: cudnn_root = get_software_root('cuDNN') if not cudnn_root: raise EasyBuildError( 'For CUDA-enabled builds cuDNN is also required') cuda_version = '.'.join( get_software_version('CUDA').split('.')[:2]) # maj.minor cudnn_version = '.'.join( get_software_version('cuDNN').split('.') [:3]) # maj.minor.patch options.extend([ '--enable_cuda', '--cuda_path=' + cuda_root, '--cuda_compute_capabilities=' + self.cfg.get_cuda_cc_template_value( 'cuda_compute_capabilities'), '--cuda_version=' + cuda_version, '--cudnn_path=' + cudnn_root, '--cudnn_version=' + cudnn_version, ]) if LooseVersion(self.version) >= LooseVersion('0.1.70'): nccl_root = get_software_root('NCCL') if nccl_root: options.append('--enable_nccl') else: options.append('--noenable_nccl') config_env_vars['GCC_HOST_COMPILER_PATH'] = which(os.getenv('CC')) else: options.append('--noenable_cuda') if self.cfg['use_mkl_dnn']: options.append('--enable_mkl_dnn') else: options.append('--noenable_mkl_dnn') # Prepend to buildopts so users can overwrite this self.cfg['buildopts'] = ' '.join(options + [ '--bazel_startup_options="%s"' % i for i in bazel_startup_options ] + ['--bazel_options="%s"' % i for i in bazel_options] + [self.cfg['buildopts']]) for key, val in sorted(config_env_vars.items()): env.setvar(key, val) # Print output of build at the end apply_regex_substitutions( 'build/build.py', [(r' shell\(command\)', ' print(shell(command))')])
def configure_step(self): """Custom configuration procedure for TensorFlow.""" tmpdir = tempfile.mkdtemp(suffix='-bazel-configure') # filter out paths from CPATH and LIBRARY_PATH. This is needed since bazel will pull some dependencies that # might conflict with dependencies on the system and/or installed with EB. For example: protobuf path_filter = self.cfg['path_filter'] if path_filter: self.log.info( "Filtering $CPATH and $LIBRARY_PATH with path filter %s", path_filter) for var in ['CPATH', 'LIBRARY_PATH']: path = os.getenv(var).split(os.pathsep) self.log.info("$%s old value was %s" % (var, path)) filtered_path = os.pathsep.join( [p for fil in path_filter for p in path if fil not in p]) env.setvar(var, filtered_path) wrapper_dir = os.path.join(tmpdir, 'bin') use_wrapper = False if self.toolchain.comp_family() == toolchain.INTELCOMP: # put wrappers for Intel C/C++ compilers in place (required to make sure license server is found) # cfr. https://github.com/bazelbuild/bazel/issues/663 for compiler in ('icc', 'icpc'): self.write_wrapper(wrapper_dir, compiler, 'NOT-USED-WITH-ICC') use_wrapper = True use_mpi = self.toolchain.options.get('usempi', False) mpi_home = '' if use_mpi: impi_root = get_software_root('impi') if impi_root: # put wrappers for Intel MPI compiler wrappers in place # (required to make sure license server and I_MPI_ROOT are found) for compiler in (os.getenv('MPICC'), os.getenv('MPICXX')): self.write_wrapper(wrapper_dir, compiler, os.getenv('I_MPI_ROOT')) use_wrapper = True # set correct value for MPI_HOME mpi_home = os.path.join(impi_root, 'intel64') else: self.log.debug("MPI module name: %s", self.toolchain.MPI_MODULE_NAME[0]) mpi_home = get_software_root(self.toolchain.MPI_MODULE_NAME[0]) self.log.debug("Derived value for MPI_HOME: %s", mpi_home) if use_wrapper: env.setvar('PATH', os.pathsep.join([wrapper_dir, os.getenv('PATH')])) self.prepare_python() self.handle_jemalloc() cuda_root = get_software_root('CUDA') cudnn_root = get_software_root('cuDNN') opencl_root = get_software_root('OpenCL') tensorrt_root = get_software_root('TensorRT') nccl_root = get_software_root('NCCL') config_env_vars = { 'CC_OPT_FLAGS': os.getenv('CXXFLAGS'), 'MPI_HOME': mpi_home, 'PYTHON_BIN_PATH': self.python_cmd, 'PYTHON_LIB_PATH': os.path.join(self.installdir, self.pylibdir), 'TF_CUDA_CLANG': '0', 'TF_ENABLE_XLA': '0', # XLA JIT support 'TF_NEED_CUDA': ('0', '1')[bool(cuda_root)], 'TF_NEED_GCP': '0', # Google Cloud Platform 'TF_NEED_GDR': '0', 'TF_NEED_HDFS': '0', # Hadoop File System 'TF_NEED_JEMALLOC': ('0', '1')[self.cfg['with_jemalloc']], 'TF_NEED_MPI': ('0', '1')[bool(use_mpi)], 'TF_NEED_OPENCL': ('0', '1')[bool(opencl_root)], 'TF_NEED_OPENCL_SYCL': '0', 'TF_NEED_S3': '0', # Amazon S3 File System 'TF_NEED_VERBS': '0', 'TF_NEED_TENSORRT': ('0', '1')[bool(tensorrt_root)], 'TF_NEED_AWS': '0', # Amazon AWS Platform 'TF_NEED_KAFKA': '0', # Amazon Kafka Platform } if cuda_root: config_env_vars.update({ 'CUDA_TOOLKIT_PATH': cuda_root, 'GCC_HOST_COMPILER_PATH': which(os.getenv('CC')), 'TF_CUDA_COMPUTE_CAPABILITIES': ','.join(self.cfg['cuda_compute_capabilities']), 'TF_CUDA_VERSION': get_software_version('CUDA'), }) if cudnn_root: config_env_vars.update({ 'CUDNN_INSTALL_PATH': cudnn_root, 'TF_CUDNN_VERSION': get_software_version('cuDNN'), }) else: raise EasyBuildError( "TensorFlow has a strict dependency on cuDNN if CUDA is enabled" ) if nccl_root: nccl_version = get_software_version('NCCL') config_env_vars.update({ 'NCCL_INSTALL_PATH': nccl_root, }) else: nccl_version = '1.3' # Use simple downloadable version config_env_vars.update({ 'TF_NCCL_VERSION': nccl_version, }) for (key, val) in sorted(config_env_vars.items()): env.setvar(key, val) # patch configure.py (called by configure script) to avoid that Bazel abuses $HOME/.cache/bazel regex_subs = [(r"(run_shell\(\['bazel')", r"\1, '--output_base=%s', '--install_base=%s'" % (tmpdir, os.path.join(tmpdir, 'inst_base')))] apply_regex_substitutions('configure.py', regex_subs) cmd = self.cfg['preconfigopts'] + './configure ' + self.cfg[ 'configopts'] run_cmd(cmd, log_all=True, simple=True)
def configure_step(self): """Configure WIEN2k build by patching siteconfig_lapw script and running it.""" self.cfgscript = "siteconfig_lapw" # patch config file first # toolchain-dependent values comp_answer = None if self.toolchain.comp_family( ) == toolchain.INTELCOMP: # @UndefinedVariable if get_software_root('icc'): intelver = get_software_version('icc') elif get_software_root('intel-compilers'): intelver = get_software_version('intel-compilers') if LooseVersion(intelver) >= LooseVersion("2011"): if LooseVersion(self.version) < LooseVersion("17"): comp_answer = 'I' # Linux (Intel ifort 12.0 compiler + mkl ) else: comp_answer = 'LI' # Linux (Intel ifort compiler (12.0 or later)+mkl+intelmpi)) else: comp_answer = "K1" # Linux (Intel ifort 11.1 compiler + mkl ) elif self.toolchain.comp_family( ) == toolchain.GCC: # @UndefinedVariable if LooseVersion(self.version) < LooseVersion("17"): comp_answer = 'V' # Linux (gfortran compiler + gotolib) else: comp_answer = 'LG' # Linux (gfortran compiler + OpenBlas) else: raise EasyBuildError( "Failed to determine toolchain-dependent answers.") # libraries liblapack = os.getenv('LIBLAPACK_MT').replace('static', 'dynamic') libscalapack = os.getenv('LIBSCALAPACK_MT').replace( 'static', 'dynamic') rlibs = "%s %s" % (liblapack, self.toolchain.get_flag('openmp')) rplibs = [libscalapack, liblapack] fftwver = get_software_version('FFTW') if fftwver: suff = '' if LooseVersion(fftwver) >= LooseVersion("3"): suff = '3' rplibs.insert(0, "-lfftw%(suff)s_mpi -lfftw%(suff)s" % {'suff': suff}) else: rplibs.append(os.getenv('LIBFFT')) rplibs = ' '.join(rplibs) vars = { 'FC': '%s' % os.getenv('F90'), 'FOPT': '%s' % os.getenv('FFLAGS'), 'MPF': '%s' % os.getenv('MPIF90'), 'FPOPT': '%s' % os.getenv('FFLAGS'), 'CC': os.getenv('CC'), 'LDFLAGS': '$(FOPT) %s ' % os.getenv('LDFLAGS'), 'R_LIBS': rlibs, # libraries for 'real' (not 'complex') binary 'RP_LIBS': rplibs, # libraries for 'real' parallel binary 'MPIRUN': '', } for line in fileinput.input(self.cfgscript, inplace=1, backup='.orig'): # set config parameters for (key, val) in vars.items(): regexp = re.compile('^([a-z0-9]+):%s:(.*)' % key) res = regexp.search(line) if res: # we need to exclude the lines with 'current', otherwise we break the script if not res.group(1) == "current": if 'OPT' in key: # append instead of replace line = regexp.sub( '\\1:%s:%s %s' % (key, res.group(2), val), line) else: line = regexp.sub('\\1:%s:%s' % (key, val), line) # avoid exit code > 0 at end of configuration line = re.sub(r'(\s+)exit 1', '\\1exit 0', line) sys.stdout.write(line) # set correct compilers env.setvar('bin', os.getcwd()) dc = { 'COMPILERC': os.getenv('CC'), 'COMPILER': os.getenv('F90'), 'COMPILERP': os.getenv('MPIF90'), } if LooseVersion(self.version) < LooseVersion("17"): for (key, val) in dc.items(): write_file(key, val) else: dc['cc'] = dc.pop('COMPILERC') dc['fortran'] = dc.pop('COMPILER') dc['parallel'] = dc.pop('COMPILERP') write_file('WIEN2k_COMPILER', '\n'.join(['%s:%s' % (k, v) for k, v in dc.items()])) # configure with patched configure script self.log.debug('%s part I (configure)' % self.cfgscript) if LooseVersion(self.version) >= LooseVersion('21'): perlroot = get_software_root('Perl') if perlroot is None: raise EasyBuildError( "Perl is a required dependency of WIEN2k as of version 21") self.perlbin = os.path.join(perlroot, 'bin', 'perl') else: self.perlbin = '' cmd = "./%s" % self.cfgscript qanda = { 'Press RETURN to continue': '', 'Your compiler:': '', 'Hit Enter to continue': '', 'Remote shell (default is ssh) =': '', 'Remote copy (default is scp) =': '', 'and you need to know details about your installed mpi ..) (y/n)': 'y', 'Q to quit Selection:': 'Q', 'A Compile all programs (suggested) Q Quit Selection:': 'Q', 'Please enter the full path of the perl program: ': self.perlbin, 'continue or stop (c/s)': 'c', '(like taskset -c). Enter N / your_specific_command:': 'N', } if LooseVersion(self.version) >= LooseVersion("13"): fftw_root = get_software_root('FFTW') if fftw_root: fftw_maj = get_software_version('FFTW').split('.')[0] fftw_spec = 'FFTW%s' % fftw_maj else: raise EasyBuildError("Required FFTW dependency is missing") qanda.update({ ') Selection:': comp_answer, 'Shared Memory Architecture? (y/N):': 'N', 'Set MPI_REMOTE to 0 / 1:': '0', 'You need to KNOW details about your installed MPI and FFTW ) (y/n)': 'y', 'Do you want to use FFTW (recommended, but for sequential code not required)? (Y,n):': 'y', 'Please specify whether you want to use FFTW3 (default) or FFTW2 (FFTW3 / FFTW2):': fftw_spec, 'Please specify the ROOT-path of your FFTW installation (like /opt/fftw3):': fftw_root, 'is this correct? enter Y (default) or n:': 'Y', }) libxcroot = get_software_root('libxc') if LooseVersion(self.version) < LooseVersion("17"): libxcstr1 = ' before' libxcstr3 = '' elif LooseVersion(self.version) > LooseVersion("19"): libxcstr1 = ' - usually not needed' libxcstr3 = 'root-' else: libxcstr1 = '' libxcstr3 = '' libxcquestion1 = 'LIBXC (that you have installed%s)? (y,N):' % libxcstr1 libxcquestion2 = 'Do you want to automatically search for LIBXC installations? (Y,n):' libxcquestion3 = 'Please enter the %sdirectory of your LIBXC-installation!:' % libxcstr3 libxcquestion4 = 'Please enter the lib-directory of your LIBXC-installation (usually lib or lib64)!:' libxcquestion5 = 'LIBXC (usually not needed, ONLY for experts who want to play with different DFT options. ' libxcquestion5 += 'It must have been installed before)? (y,N):' if libxcroot: qanda.update({ libxcquestion1: 'y', libxcquestion2: 'n', libxcquestion3: libxcroot, libxcquestion4: 'lib', libxcquestion5: 'y', }) else: qanda.update({ libxcquestion1: 'N', libxcquestion5: 'N', }) if LooseVersion(self.version) >= LooseVersion("17"): scalapack_libs = os.getenv('LIBSCALAPACK').split() scalapack = next( (lib[2:] for lib in scalapack_libs if 'scalapack' in lib), 'scalapack') blacs = next( (lib[2:] for lib in scalapack_libs if 'blacs' in lib), 'openblas') qanda.update({ 'You need to KNOW details about your installed MPI, ELPA, and FFTW ) (y/N)': 'y', 'Do you want to use a present ScaLAPACK installation? (Y,n):': 'y', 'Do you want to use the MKL version of ScaLAPACK? (Y,n):': 'n', # we set it ourselves below 'Do you use Intel MPI? (Y,n):': 'y', 'Is this correct? (Y,n):': 'y', 'Please specify the target architecture of your ScaLAPACK libraries (e.g. intel64)!:': '', 'ScaLAPACK root:': os.getenv('MKLROOT') or os.getenv('EBROOTSCALAPACK'), 'ScaLAPACK library:': scalapack, 'BLACS root:': os.getenv('MKLROOT') or os.getenv('EBROOTOPENBLAS'), 'BLACS library:': blacs, 'Please enter your choice of additional libraries!:': '', 'Do you want to use a present FFTW installation? (Y,n):': 'y', 'Please specify the path of your FFTW installation (like /opt/fftw3/) ' 'or accept present choice (enter):': fftw_root, 'Please specify the target achitecture of your FFTW library (e.g. lib64) ' 'or accept present choice (enter):': '', 'Do you want to automatically search for FFTW installations? (Y,n):': 'n', 'Please specify the ROOT-path of your FFTW installation (like /opt/fftw3/) ' 'or accept present choice (enter):': fftw_root, 'Is this correct? enter Y (default) or n:': 'Y', 'Please specify the name of your FFTW library or accept present choice (enter):': '', 'or accept the recommendations (Enter - default)!:': '', # the temporary directory is hardcoded into execution scripts and must exist at runtime 'Please enter the full path to your temporary directory:': '/tmp', }) std_qa = {} elparoot = get_software_root('ELPA') if elparoot: apply_regex_substitutions( self.cfgscript, [(r'cat elpahelp2$', 'cat -n elpahelp2')]) elpa_dict = { 'root': elparoot, 'version': get_software_version('ELPA'), 'variant': 'elpa_openmp' if self.toolchain.get_flag('openmp') else 'elpa' } elpa_dir = "%(root)s/include/%(variant)s-%(version)s" % elpa_dict std_qa.update({ r".*(?P<number>[0-9]+)\t%s\n(.*\n)*" % elpa_dir: "%(number)s", }) qanda.update({ 'Do you want to use ELPA? (y,N):': 'y', 'Do you want to automatically search for ELPA installations? (Y,n):': 'n', 'Please specify the ROOT-path of your ELPA installation (like /usr/local/elpa/) ' 'or accept present path (Enter):': elparoot, 'Please specify the lib-directory of your ELPA installation (e.g. lib or lib64)!:': 'lib', 'Please specify the lib-directory of your ELPA installation (e.g. lib or lib64):': 'lib', 'Please specify the name of your installed ELPA library (e.g. elpa or elpa_openmp)!:': elpa_dict['variant'], 'Please specify the name of your installed ELPA library (e.g. elpa or elpa_openmp):': elpa_dict['variant'], }) else: qanda.update({'Do you want to use ELPA? (y,N):': 'n'}) else: qanda.update({ 'compiler) Selection:': comp_answer, 'Shared Memory Architecture? (y/n):': 'n', 'If you are using mpi2 set MPI_REMOTE to 0 Set MPI_REMOTE to 0 / 1:': '0', 'Do you have MPI and Scalapack installed and intend to run ' 'finegrained parallel? (This is usefull only for BIG cases ' '(50 atoms and more / unit cell) and you need to know details ' 'about your installed mpi and fftw ) (y/n)': 'y', }) no_qa = [ 'You have the following mkl libraries in %s :' % os.getenv('MKLROOT'), "%s[ \t]*.*" % os.getenv('MPIF90'), "%s[ \t]*.*" % os.getenv('F90'), "%s[ \t]*.*" % os.getenv('CC'), ".*SRC_.*", ] std_qa.update({ r'S\s+Save and Quit[\s\n]+To change an item select option.[\s\n]+Selection:': 'S', 'Recommended setting for parallel f90 compiler: .* Current selection: Your compiler:': os.getenv('MPIF90'), r'process or you can change single items in "Compiling Options".[\s\n]+Selection:': 'S', r'A\s+Compile all programs (suggested)[\s\n]+Q\s*Quit[\s\n]+Selection:': 'Q', }) run_cmd_qa(cmd, qanda, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True) # post-configure patches parallel_options = {} parallel_options_fp = os.path.join(self.cfg['start_dir'], 'parallel_options') if self.cfg['wien_mpirun']: parallel_options.update({'WIEN_MPIRUN': self.cfg['wien_mpirun']}) if self.cfg['taskset'] is None: self.cfg['taskset'] = 'no' parallel_options.update({'TASKSET': self.cfg['taskset']}) for opt in ['use_remote', 'mpi_remote', 'wien_granularity']: parallel_options.update({opt.upper(): int(self.cfg[opt])}) write_file( parallel_options_fp, '\n'.join( ['setenv %s "%s"' % tup for tup in parallel_options.items()])) if self.cfg['remote']: if self.cfg['remote'] == 'pbsssh': extratxt = '\n'.join([ '', "set remote = pbsssh", "setenv PBSSSHENV 'LD_LIBRARY_PATH PATH'", '', ]) write_file(parallel_options_fp, extratxt, append=True) else: raise EasyBuildError("Don't know how to handle remote %s", self.cfg['remote']) self.log.debug("Patched file %s: %s", parallel_options_fp, read_file(parallel_options_fp)) # Set configurable parameters for size of problems. param_subs = [ (r'\s+PARAMETER\s+\(\s*NMATMAX\s*=\s*\d+\)', r' PARAMETER (NMATMAX=%s)' % self.cfg['nmatmax']), (r'\s+PARAMETER\s+\(\s*NUME\s*=\s*\d+\)', r' PARAMETER (NUME=%s)' % self.cfg['nume']), ] self.log.debug("param_subs = %s" % param_subs) apply_regex_substitutions('SRC_lapw1/param.inc', param_subs) self.log.debug("Patched file %s: %s", 'SRC_lapw1/param.inc', read_file('SRC_lapw1/param.inc'))
def prepare_step(self): """Custom prepare step for Tau: check required dependencies and collect information on them.""" super(EB_TAU, self).prepare_step() # install prefixes for selected backends for dep in ['Scalasca', 'Score-P', 'Vampirtrace']: root = get_software_root(dep) backend_name = dep.lower().replace('-', '') if backend_name in self.cfg['extra_backends']: if root: setattr(self, backend_name, root) else: raise EasyBuildError( "%s is listed in extra_backends, but not available as a dependency" % dep) elif root: raise EasyBuildError( "%s included as dependency, but '%s' not in extra_backends" % (dep, backend_name)) # make sure Scalasca v1.x is used as a dependency (if it's there) if hasattr(self, 'scalasca' ) and get_software_version('Scalasca').split('.')[0] != '1': raise EasyBuildError( "Scalasca v1.x must be used when scalasca backend is enabled") # determine values for compiler flags to use known_compilers = { toolchain.CLANGGCC: ['clang', 'clang++', 'gfortran'], toolchain.GCC: ['gcc', 'g++', 'gfortran'], toolchain.INTELCOMP: ['icc', 'icpc', 'intel'], } comp_fam = self.toolchain.comp_family() if comp_fam in known_compilers: self.cc, self.cxx, self.fortran = known_compilers[comp_fam] else: raise EasyBuildError("Compiler family not supported yet: %s" % comp_fam) # determine values for MPI flags self.mpi_inc_dir, self.mpi_lib_dir = os.getenv( 'MPI_INC_DIR'), os.getenv('MPI_LIB_DIR') if self.mpi_inc_dir is None or self.mpi_lib_dir is None: raise EasyBuildError( "Failed to determine MPI include/library paths, no MPI available in toolchain?" ) # determine value for optional packages option template self.opt_pkgs_opts = '' for dep, opt in [('PAPI', 'papi'), ('PDT', 'pdt'), ('binutils', 'bfd')]: root = get_software_root(dep) if root: self.opt_pkgs_opts += ' -%s=%s' % (opt, root) # determine list of labels, based on selected (extra) backends, variants and optional packages self.variant_labels = [] backend_labels = [ '', '-epilog-scalasca-trace', '-scorep', '-vampirtrace-trace' ] for backend, backend_label in zip(['tau'] + KNOWN_BACKENDS, backend_labels): if backend == 'tau' or backend in self.cfg['extra_backends']: for pref, suff in [('-mpi', ''), ('', '-openmp-opari'), ('-mpi', '-openmp-opari')]: variant_label = 'tau' if self.cxx != 'g++': variant_label += '-' + self.cxx if get_software_root('PAPI'): variant_label += '-papi' variant_label += pref if get_software_root('PDT'): variant_label += '-pdt' variant_label += suff + backend_label self.variant_labels.append(variant_label) # make sure selected default Tau makefile will be available avail_makefiles = ['Makefile.' + l for l in self.variant_labels] if self.cfg['tau_makefile'] not in avail_makefiles: tup = (self.cfg['tau_makefile'], avail_makefiles) raise EasyBuildError( "Specified tau_makefile %s will not be available (only: %s)" % tup) # create install directory and make sure it does not get cleaned up again in the install step; # the first configure iteration already puts things in place in the install directory, # so that shouldn't get cleaned up afterwards... self.log.info( "Creating install dir %s before starting configure-build-install iterations", self.installdir) self.make_installdir() self.cfg['keeppreviousinstall'] = True
def _set_fftw_variables(self): if not hasattr(self, 'BLAS_LIB_DIR'): raise EasyBuildError("_set_fftw_variables: IntelFFT based on IntelMKL (no BLAS_LIB_DIR found)") imklver = get_software_version(self.FFT_MODULE_NAME[0]) picsuff = '' if self.options.get('pic', None): picsuff = '_pic' bitsuff = '_lp64' if self.options.get('i8', None): bitsuff = '_ilp64' compsuff = '_intel' if get_software_root('icc') is None: if get_software_root('PGI'): compsuff = '_pgi' elif get_software_root('GCC'): compsuff = '_gnu' else: raise EasyBuildError("Not using Intel compilers, PGI nor GCC, don't know compiler suffix for FFTW libraries.") interface_lib = "fftw3xc%s%s" % (compsuff, picsuff) fftw_libs = [interface_lib] cluster_interface_lib = None if self.options.get('usempi', False): # add cluster interface for recent imkl versions if LooseVersion(imklver) >= LooseVersion('10.3'): suff = picsuff if LooseVersion(imklver) >= LooseVersion('11.0.2'): suff = bitsuff + suff cluster_interface_lib = 'fftw3x_cdft%s' % suff fftw_libs.append(cluster_interface_lib) fftw_libs.append("mkl_cdft_core") # add cluster dft fftw_libs.extend(self.variables['LIBBLACS'].flatten()) # add BLACS; use flatten because ListOfList self.log.debug('fftw_libs %s' % fftw_libs.__repr__()) fftw_libs.extend(self.variables['LIBBLAS'].flatten()) # add BLAS libs (contains dft) self.log.debug('fftw_libs %s' % fftw_libs.__repr__()) self.FFT_LIB_DIR = self.BLAS_LIB_DIR self.FFT_INCLUDE_DIR = [os.path.join(d, 'fftw') for d in self.BLAS_INCLUDE_DIR] # building the FFTW interfaces is optional, # so make sure libraries are there before FFT_LIB is set imklroot = get_software_root(self.FFT_MODULE_NAME[0]) fft_lib_dirs = [os.path.join(imklroot, d) for d in self.FFT_LIB_DIR] fftw_lib_exists = lambda x: any([os.path.exists(os.path.join(d, "lib%s.a" % x)) for d in fft_lib_dirs]) if not fftw_lib_exists(interface_lib) and LooseVersion(imklver) >= LooseVersion("10.2"): # interface libs can be optional: # MKL >= 10.2 include fftw3xc and fftw3xf interfaces in LIBBLAS=libmkl_gf/libmkl_intel # See https://software.intel.com/en-us/articles/intel-mkl-main-libraries-contain-fftw3-interfaces # The cluster interface libs (libfftw3x_cdft*) can be omitted if the toolchain does not provide MPI-FFTW # interfaces. fftw_libs = [l for l in fftw_libs if l not in [interface_lib, cluster_interface_lib]] # filter out libraries from list of FFTW libraries to check for if they are not provided by Intel MKL check_fftw_libs = [lib for lib in fftw_libs if lib not in ['dl', 'gfortran']] if all([fftw_lib_exists(lib) for lib in check_fftw_libs]): self.FFT_LIB = fftw_libs else: msg = "Not all FFTW interface libraries %s are found in %s" % (check_fftw_libs, fft_lib_dirs) msg += ", can't set $FFT_LIB." if self.dry_run: dry_run_warning(msg, silent=build_option('silent')) else: raise EasyBuildError(msg)
def configure_step(self): """Custom configure step for NAMD, we build charm++ first (if required).""" # complete Charm ++ and NAMD architecture string with compiler family comp_fam = self.toolchain.comp_family() if self.toolchain.options.get('usempi', False): charm_arch_comp = 'mpicxx' else: charm_arch_comps = { toolchain.GCC: 'gcc', toolchain.INTELCOMP: 'icc', } charm_arch_comp = charm_arch_comps.get(comp_fam, None) namd_comps = { toolchain.GCC: 'g++', toolchain.INTELCOMP: 'icc', } namd_comp = namd_comps.get(comp_fam, None) if charm_arch_comp is None or namd_comp is None: raise EasyBuildError("Unknown compiler family, can't complete Charm++/NAMD target architecture.") # NOTE: important to add smp BEFORE the compiler # charm arch style is: mpi-linux-x86_64-smp-mpicxx # otherwise the setting of name_charm_arch below will get things # in the wrong order if self.toolchain.options.get('openmp', False): self.cfg.update('charm_arch', 'smp') self.cfg.update('charm_arch', charm_arch_comp) self.log.info("Updated 'charm_arch': %s", self.cfg['charm_arch']) self.namd_arch = '%s-%s' % (self.cfg['namd_basearch'], namd_comp) self.log.info("Completed NAMD target architecture: %s", self.namd_arch) cmd = "./build charm++ %(arch)s %(opts)s --with-numa -j%(parallel)s '%(cxxflags)s'" % { 'arch': self.cfg['charm_arch'], 'cxxflags': os.environ['CXXFLAGS'] + ' -DMPICH_IGNORE_CXX_SEEK ' + self.cfg['charm_extra_cxxflags'], 'opts': self.cfg['charm_opts'], 'parallel': self.cfg['parallel'], } charm_subdir = '.'.join(os.path.basename(self.charm_tarballs[0]).split('.')[:-1]) self.log.debug("Building Charm++ using cmd '%s' in '%s'" % (cmd, charm_subdir)) run_cmd(cmd, path=charm_subdir) # compiler (options) self.cfg.update('namd_cfg_opts', '--cc "%s" --cc-opts "%s"' % (os.environ['CC'], os.environ['CFLAGS'])) cxxflags = os.environ['CXXFLAGS'] if LooseVersion(self.version) >= LooseVersion('2.12'): cxxflags += ' --std=c++11' self.cfg.update('namd_cfg_opts', '--cxx "%s" --cxx-opts "%s"' % (os.environ['CXX'], cxxflags)) # NAMD dependencies: CUDA, TCL, FFTW cuda = get_software_root('CUDA') if cuda: self.cfg.update('namd_cfg_opts', "--with-cuda --cuda-prefix %s" % cuda) tcl = get_software_root('Tcl') if tcl: self.cfg.update('namd_cfg_opts', '--with-tcl --tcl-prefix %s' % tcl) tclversion = '.'.join(get_software_version('Tcl').split('.')[0:2]) tclv_subs = [(r'-ltcl[\d.]*\s', '-ltcl%s ' % tclversion)] apply_regex_substitutions(os.path.join('arch', '%s.tcl' % self.cfg['namd_basearch']), tclv_subs) fftw = get_software_root('FFTW') if fftw: if LooseVersion(get_software_version('FFTW')) >= LooseVersion('3.0'): if LooseVersion(self.version) >= LooseVersion('2.9'): self.cfg.update('namd_cfg_opts', "--with-fftw3") else: raise EasyBuildError("Using FFTW v3.x only supported in NAMD v2.9 and up.") else: self.cfg.update('namd_cfg_opts', "--with-fftw") self.cfg.update('namd_cfg_opts', "--fftw-prefix %s" % fftw) namd_charm_arch = "--charm-arch %s" % '-'.join(self.cfg['charm_arch'].strip().split()) cmd = "./config %s %s %s " % (self.namd_arch, namd_charm_arch, self.cfg["namd_cfg_opts"]) run_cmd(cmd)
def build_step(self): """Custom build step for BerkeleyGW.""" self.cfg['parallel'] = 1 self.cfg['buildopts'] = 'all-flavors' copy_file(os.path.join('config', 'generic.mpi.linux.mk'), 'arch.mk') mpicc = os.environ['MPICC'] mpicxx = os.environ['MPICXX'] mpif90 = os.environ['MPIF90'] paraflags = [] var_suffix = '' if self.toolchain.options.get('openmp', None): paraflags.append('-DOMP') var_suffix = '_MT' if self.toolchain.options.get('usempi', None): paraflags.append('-DMPI') self.cfg.update('buildopts', 'C_PARAFLAG="-DPARA"') self.cfg.update('buildopts', 'PARAFLAG="%s"' % ' '.join(paraflags)) if self.toolchain.options.get('debug', None): self.cfg.update('buildopts', 'DEBUGFLAG="-DDEBUG -DVERBOSE"') else: self.cfg.update('buildopts', 'DEBUGFLAG=""') self.cfg.update('buildopts', 'LINK="%s"' % mpif90) self.cfg.update('buildopts', 'C_LINK="%s"' % mpicxx) self.cfg.update('buildopts', 'FOPTS="%s"' % os.environ['FFLAGS']) self.cfg.update('buildopts', 'C_OPTS="%s"' % os.environ['CFLAGS']) self.cfg.update( 'buildopts', 'LAPACKLIB="%s"' % os.environ['LIBLAPACK' + var_suffix]) self.cfg.update( 'buildopts', 'SCALAPACKLIB="%s"' % os.environ['LIBSCALAPACK' + var_suffix]) mathflags = [] if self.cfg['with_scalapack']: mathflags.append('-DUSESCALAPACK') if self.cfg['unpacked']: mathflags.append('-DUNPACKED') comp_fam = self.toolchain.comp_family() if comp_fam == toolchain.INTELCOMP: self.cfg.update('buildopts', 'COMPFLAG="-DINTEL"') self.cfg.update('buildopts', 'MOD_OPT="-module "') self.cfg.update('buildopts', 'F90free="%s -free"' % mpif90) self.cfg.update('buildopts', 'FCPP="cpp -C -P -ffreestanding"') self.cfg.update('buildopts', 'C_COMP="%s"' % mpicc) self.cfg.update('buildopts', 'CC_COMP="%s"' % mpicxx) self.cfg.update('buildopts', 'BLACSDIR="%s"' % os.environ['BLACS_LIB_DIR']) self.cfg.update('buildopts', 'BLACS="%s"' % os.environ['LIBBLACS']) elif comp_fam == toolchain.GCC: c_flags = "-std=c99" cxx_flags = "-std=c++0x" f90_flags = "-ffree-form -ffree-line-length-none -fno-second-underscore" if LooseVersion(get_software_version('GCC')) >= LooseVersion('10'): c_flags += " -fcommon" cxx_flags += " -fcommon" f90_flags += " -fallow-argument-mismatch" self.cfg.update('buildopts', 'COMPFLAG="-DGNU"') self.cfg.update('buildopts', 'MOD_OPT="-J "') self.cfg.update('buildopts', 'F90free="%s %s"' % (mpif90, f90_flags)) self.cfg.update('buildopts', 'FCPP="cpp -C -nostdinc -nostdinc++"') self.cfg.update('buildopts', 'C_COMP="%s %s"' % (mpicc, c_flags)) self.cfg.update('buildopts', 'CC_COMP="%s %s"' % (mpicxx, cxx_flags)) else: raise EasyBuildError( "EasyBuild does not yet have support for building BerkeleyGW with toolchain %s" % comp_fam) mkl = get_software_root('imkl') if mkl: self.cfg.update('buildopts', 'MKLPATH="%s"' % os.getenv('MKLROOT')) fftw = get_software_root('FFTW') if mkl or fftw: mathflags.append('-DUSEFFTW3') self.cfg.update('buildopts', 'FFTWINCLUDE="%s"' % os.environ['FFTW_INC_DIR']) libfft_var = 'LIBFFT%s' % var_suffix fft_libs = os.environ[libfft_var] if fftw and get_software_root('fftlib'): fft_libs = "%s %s" % (os.environ['FFTLIB'], fft_libs) self.cfg.update('buildopts', 'FFTWLIB="%s"' % fft_libs) hdf5 = get_software_root('HDF5') if hdf5: mathflags.append('-DHDF5') self.cfg.update('buildopts', 'HDF5INCLUDE="%s/include"' % hdf5) self.cfg.update( 'buildopts', 'HDF5LIB="-L%s/lib -lhdf5hl_fortran -lhdf5_hl -lhdf5_fortran -lhdf5 -lsz -lz"' % hdf5) elpa = get_software_root('ELPA') if elpa: if not self.cfg['with_scalapack']: raise EasyBuildError( "ELPA requires ScaLAPACK but 'with_scalapack' is set to False" ) mathflags.append('-DUSEELPA') elpa_suffix = '_openmp' if self.toolchain.options.get( 'openmp', None) else '' self.cfg.update( 'buildopts', 'ELPALIB="%s/lib/libelpa%s.a"' % (elpa, elpa_suffix)) self.cfg.update( 'buildopts', 'ELPAINCLUDE="%s/include/elpa%s-%s/modules"' % (elpa, elpa_suffix, get_software_version('ELPA'))) self.cfg.update('buildopts', 'MATHFLAG="%s"' % ' '.join(mathflags)) super(EB_BerkeleyGW, self).build_step()
def _set_fftw_variables(self): if not hasattr(self, 'BLAS_LIB_DIR'): raise EasyBuildError( "_set_fftw_variables: IntelFFT based on IntelMKL (no BLAS_LIB_DIR found)" ) imklver = get_software_version(self.FFT_MODULE_NAME[0]) picsuff = '' if self.options.get('pic', None): picsuff = '_pic' bitsuff = '_lp64' if self.options.get('i8', None): bitsuff = '_ilp64' compsuff = '_intel' if get_software_root('icc') is None: if get_software_root('PGI'): compsuff = '_pgi' elif get_software_root('GCC'): compsuff = '_gnu' else: raise EasyBuildError( "Not using Intel compilers, PGI nor GCC, don't know compiler suffix for FFTW libraries." ) fftw_libs = ["fftw3xc%s%s" % (compsuff, picsuff)] if self.options['usempi']: # add cluster interface for recent imkl versions if LooseVersion(imklver) >= LooseVersion("11.0.2"): fftw_libs.append("fftw3x_cdft%s%s" % (bitsuff, picsuff)) elif LooseVersion(imklver) >= LooseVersion("10.3"): fftw_libs.append("fftw3x_cdft%s" % picsuff) fftw_libs.append("mkl_cdft_core") # add cluster dft fftw_libs.extend(self.variables['LIBBLACS'].flatten() ) # add BLACS; use flatten because ListOfList self.log.debug('fftw_libs %s' % fftw_libs.__repr__()) fftw_libs.extend(self.variables['LIBBLAS'].flatten() ) # add BLAS libs (contains dft) self.log.debug('fftw_libs %s' % fftw_libs.__repr__()) self.FFT_LIB_DIR = self.BLAS_LIB_DIR self.FFT_INCLUDE_DIR = self.BLAS_INCLUDE_DIR # building the FFTW interfaces is optional, # so make sure libraries are there before FFT_LIB is set imklroot = get_software_root(self.FFT_MODULE_NAME[0]) fft_lib_dirs = [os.path.join(imklroot, d) for d in self.FFT_LIB_DIR] # filter out libraries from list of FFTW libraries to check for if they are not provided by Intel MKL check_fftw_libs = [ lib for lib in fftw_libs if lib not in ['dl', 'gfortran'] ] fftw_lib_exists = lambda x: any([ os.path.exists(os.path.join(d, "lib%s.a" % x)) for d in fft_lib_dirs ]) if all([fftw_lib_exists(lib) for lib in check_fftw_libs]): self.FFT_LIB = fftw_libs else: msg = "Not all FFTW interface libraries %s are found in %s" % ( check_fftw_libs, fft_lib_dirs) msg += ", can't set $FFT_LIB." if self.dry_run: dry_run_warning(msg, silent=build_option('silent')) else: raise EasyBuildError(msg)
def configure_intel_based(self): """Configure for Intel based toolchains""" # based on guidelines available at # http://software.intel.com/en-us/articles/build-cp2k-using-intel-fortran-compiler-professional-edition/ intelurl = ''.join(["http://software.intel.com/en-us/articles/", "build-cp2k-using-intel-fortran-compiler-professional-edition/"]) options = self.configure_common() extrainc = '' if self.modincpath: extrainc = '-I%s' % self.modincpath options.update({ # -Vaxlib : older options 'FREE': '-fpp -free', # SAFE = -assume protect_parens -fp-model precise -ftz # causes problems, so don't use this 'SAFE': '-assume protect_parens -no-unroll-aggressive', 'INCFLAGS': '$(DFLAGS) -I$(INTEL_INC) -I$(INTEL_INCF) %s' % extrainc, 'LDFLAGS': '$(INCFLAGS) ', 'OBJECTS_ARCHITECTURE': 'machine_intel.o', }) options['DFLAGS'] += ' -D__INTEL' options['FCFLAGSOPT'] += ' $(INCFLAGS) -heap-arrays 64' options['FCFLAGSOPT2'] += ' $(INCFLAGS) -heap-arrays 64' ifortver = LooseVersion(get_software_version('ifort')) # Required due to memory leak that occurs if high optimizations are used (from CP2K 7.1 intel-popt-makefile) if ifortver >= LooseVersion("2018.5"): self.make_instructions += "mp2_optimize_ri_basis.o: mp2_optimize_ri_basis.F\n" \ "\t$(FC) -c $(subst O2,O0,$(FCFLAGSOPT)) $<\n" self.log.info("Optimization level of mp2_optimize_ri_basis.F was decreased to '-O0'") # RHEL8 intel/2020a lots of CPASSERT failed (due to high optimization in cholesky decomposition) if ifortver >= LooseVersion("2019"): self.make_instructions += "cp_fm_cholesky.o: cp_fm_cholesky.F\n\t$(FC) -c $(FCFLAGS2) $<\n" self.log.info("Optimization flags for cp_fm_cholesky.F is set to '%s'", options['FCFLAGSOPT2']) # -i-static has been deprecated prior to 2013, but was still usable. From 2015 it is not. if ifortver < LooseVersion("2013"): options['LDFLAGS'] += ' -i-static ' else: options['LDFLAGS'] += ' -static-intel ' # Otherwise it fails on linking, since there are 2 definitions of main if LooseVersion(self.version) >= LooseVersion('4.1'): options['LDFLAGS'] += ' -nofor-main ' failmsg = "CP2K won't build correctly with the Intel %%s compilers prior to %%s, see %s" % intelurl if ifortver >= LooseVersion("2011") and ifortver < LooseVersion("2012"): # don't allow using Intel compiler 2011 prior to release 8, because of known issue (see Intel URL) if ifortver >= LooseVersion("2011.8"): # add additional make instructions to Makefile self.make_instructions += "et_coupling.o: et_coupling.F\n\t$(FC) -c $(FCFLAGS2) $<\n" self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n" else: raise EasyBuildError(failmsg, "v12", "v2011.8") elif ifortver >= LooseVersion("11"): if LooseVersion(get_software_version('ifort')) >= LooseVersion("11.1.072"): self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n" else: raise EasyBuildError(failmsg, "v11", "v11.1.072") else: raise EasyBuildError("Intel compilers version %s not supported yet.", ifortver) return options
env.setvar('NWCHEM_TARGET', self.cfg['target']) env.setvar('MSG_COMMS', self.cfg['msg_comms']) env.setvar('ARMCI_NETWORK', self.cfg['armci_network']) if self.cfg['armci_network'] in ["OPENIB"]: env.setvar('IB_INCLUDE', "/usr/include") env.setvar('IB_LIB', "/usr/lib64") env.setvar('IB_LIB_NAME', "-libumad -libverbs -lpthread") if 'python' in self.cfg['modules']: python_root = get_software_root('Python') if not python_root: self.log.error( "Python module not loaded, you should add Python as a dependency." ) env.setvar('PYTHONHOME', python_root) pyver = '.'.join(get_software_version('Python').split('.')[0:2]) env.setvar('PYTHONVERSION', pyver) # if libreadline is loaded, assume it was a dependency for Python # pass -lreadline to avoid linking issues (libpython2.7.a doesn't include readline symbols) libreadline = get_software_root('libreadline') if libreadline: libreadline_libdir = os.path.join( libreadline, get_software_libdir('libreadline')) ncurses = get_software_root('ncurses') if not ncurses: self.log.error( "ncurses is not loaded, but required to link with libreadline" ) ncurses_libdir = os.path.join(ncurses, get_software_libdir('ncurses')) readline_libs = ' '.join([
def configure_intel_based(self): """Configure for Intel based toolchains""" # based on guidelines available at # http://software.intel.com/en-us/articles/build-cp2k-using-intel-fortran-compiler-professional-edition/ intelurl = ''.join(["http://software.intel.com/en-us/articles/", "build-cp2k-using-intel-fortran-compiler-professional-edition/"]) options = self.configure_common() extrainc = '' if self.modincpath: extrainc = '-I%s' % self.modincpath options.update({ # -Vaxlib : older options 'FREE': '-fpp -free', # SAFE = -assume protect_parens -fp-model precise -ftz # causes problems, so don't use this 'SAFE': '-assume protect_parens -no-unroll-aggressive', 'INCFLAGS': '$(DFLAGS) -I$(INTEL_INC) -I$(INTEL_INCF) %s' % extrainc, 'LDFLAGS': '$(INCFLAGS) -i-static', 'OBJECTS_ARCHITECTURE': 'machine_intel.o', }) options['DFLAGS'] += ' -D__INTEL' optarch = '' if self.toolchain.options['optarch']: optarch = '-xHOST' options['FCFLAGSOPT'] += ' $(INCFLAGS) %s -heap-arrays 64' % optarch options['FCFLAGSOPT2'] += ' $(INCFLAGS) %s -heap-arrays 64' % optarch ifortver = LooseVersion(get_software_version('ifort')) failmsg = "CP2K won't build correctly with the Intel %%s compilers prior to %%s, see %s" % intelurl if ifortver >= LooseVersion("2011") and ifortver < LooseVersion("2012"): # don't allow using Intel compiler 2011 prior to release 8, because of known issue (see Intel URL) if ifortver >= LooseVersion("2011.8"): # add additional make instructions to Makefile self.make_instructions += "et_coupling.o: et_coupling.F\n\t$(FC) -c $(FCFLAGS2) $<\n" self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n" else: self.log.error(failmsg % ("v12", "v2011.8")) elif ifortver >= LooseVersion("11"): if LooseVersion(get_software_version('ifort')) >= LooseVersion("11.1.072"): self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n" else: self.log.error(failmsg % ("v11", "v11.1.072")) else: self.log.error("Intel compilers version %s not supported yet." % ifortver) return options
def configure_step(self): """Custom configure step for NAMD, we build charm++ first (if required).""" # complete Charm ++ and NAMD architecture string with compiler family comp_fam = self.toolchain.comp_family() if self.toolchain.options['usempi']: charm_arch_comp = 'mpicxx' else: charm_arch_comps = { toolchain.GCC: 'gcc', toolchain.INTELCOMP: 'icc', } charm_arch_comp = charm_arch_comps.get(comp_fam, None) namd_comps = { toolchain.GCC: 'g++', toolchain.INTELCOMP: 'icc', } namd_comp = namd_comps.get(comp_fam, None) if charm_arch_comp is None or namd_comp is None: raise EasyBuildError( "Unknown compiler family, can't complete Charm++/NAMD target architecture." ) self.cfg.update('charm_arch', charm_arch_comp) self.log.info("Updated 'charm_arch': %s" % self.cfg['charm_arch']) self.namd_arch = '%s-%s' % (self.cfg['namd_basearch'], namd_comp) self.log.info("Completed NAMD target architecture: %s" % self.namd_arch) charm_tarballs = glob.glob('charm-*.tar') if len(charm_tarballs) != 1: raise EasyBuildError( "Expected to find exactly one tarball for Charm++, found: %s", charm_tarballs) extract_file(charm_tarballs[0], os.getcwd()) tup = (self.cfg['charm_arch'], self.cfg['charm_opts'], self.cfg['parallel'], os.environ['CXXFLAGS']) cmd = "./build charm++ %s %s -j%s %s -DMPICH_IGNORE_CXX_SEEK" % tup charm_subdir = '.'.join( os.path.basename(charm_tarballs[0]).split('.')[:-1]) self.log.debug("Building Charm++ using cmd '%s' in '%s'" % (cmd, charm_subdir)) run_cmd(cmd, path=charm_subdir) # compiler (options) self.cfg.update( 'namd_cfg_opts', '--cc "%s" --cc-opts "%s"' % (os.environ['CC'], os.environ['CFLAGS'])) self.cfg.update( 'namd_cfg_opts', '--cxx "%s" --cxx-opts "%s"' % (os.environ['CXX'], os.environ['CXXFLAGS'])) # NAMD dependencies: CUDA, FFTW cuda = get_software_root('CUDA') if cuda: self.cfg.update('namd_cfg_opts', "--with-cuda --cuda-prefix %s" % cuda) fftw = get_software_root('FFTW') if fftw: if LooseVersion( get_software_version('FFTW')) >= LooseVersion('3.0'): if LooseVersion(self.version) >= LooseVersion('2.9'): self.cfg.update('namd_cfg_opts', "--with-fftw3") else: raise EasyBuildError( "Using FFTW v3.x only supported in NAMD v2.9 and up.") else: self.cfg.update('namd_cfg_opts', "--with-fftw") self.cfg.update('namd_cfg_opts', "--fftw-prefix %s" % fftw) namd_charm_arch = "--charm-arch %s" % '-'.join( self.cfg['charm_arch'].strip().split(' ')) cmd = "./config %s %s %s " % (self.namd_arch, namd_charm_arch, self.cfg["namd_cfg_opts"]) run_cmd(cmd)
def configure_step(self): """Configure build - build Libint wrapper - generate Makefile """ known_types = ['popt', 'psmp'] if self.cfg['type'] not in known_types: raise EasyBuildError("Unknown build type specified: '%s', known types are %s", self.cfg['type'], known_types) # correct start dir, if needed # recent CP2K versions have a 'cp2k' dir in the unpacked 'cp2k' dir cp2k_path = os.path.join(self.cfg['start_dir'], 'cp2k') if os.path.exists(cp2k_path): self.cfg['start_dir'] = cp2k_path self.log.info("Corrected start_dir to %s" % self.cfg['start_dir']) # set compilers options according to toolchain config # full debug: -g -traceback -check all -fp-stack-check # -g links to mpi debug libs if self.toolchain.options['debug']: self.debug = '-g' self.log.info("Debug build") if self.toolchain.options['pic']: self.fpic = "-fPIC" self.log.info("Using fPIC") # report on extra flags being used if self.cfg['extracflags']: self.log.info("Using extra CFLAGS: %s" % self.cfg['extracflags']) if self.cfg['extradflags']: self.log.info("Using extra DFLAGS: %s" % self.cfg['extradflags']) # lib(x)smm support libsmm = get_software_root('libsmm') libxsmm = get_software_root('libxsmm') if libxsmm: self.cfg.update('extradflags', '-D__LIBXSMM') self.libsmm = '-lxsmm -lxsmmf' self.log.debug('Using libxsmm %s' % libxsmm) elif libsmm: libsmms = glob.glob(os.path.join(libsmm, 'lib', 'libsmm_*nn.a')) dfs = [os.path.basename(os.path.splitext(x)[0]).replace('lib', '-D__HAS_') for x in libsmms] moredflags = ' ' + ' '.join(dfs) self.cfg.update('extradflags', moredflags) self.libsmm = ' '.join(libsmms) self.log.debug('Using libsmm %s (extradflags %s)' % (self.libsmm, moredflags)) # obtain list of modinc's to use if self.cfg["modinc"]: self.modincpath = self.prepmodinc() # set typearch self.typearch = "Linux-x86-64-%s" % self.toolchain.name # extra make instructions self.make_instructions = '' # "graphcon.o: graphcon.F\n\t$(FC) -c $(FCFLAGS2) $<\n" # compiler toolchain specific configuration comp_fam = self.toolchain.comp_family() if comp_fam == toolchain.INTELCOMP: options = self.configure_intel_based() elif comp_fam == toolchain.GCC: options = self.configure_GCC_based() else: raise EasyBuildError("Don't know how to tweak configuration for compiler family %s" % comp_fam) # BLAS/LAPACK/FFTW if get_software_root('imkl'): options = self.configure_MKL(options) else: # BLAS if get_software_root('ACML'): options = self.configure_ACML(options) else: options = self.configure_BLAS_lib(options) # FFTW (no MKL involved) if 'fftw3' in os.getenv('LIBFFT', ''): options = self.configure_FFTW3(options) # LAPACK if os.getenv('LIBLAPACK_MT', None) is not None: options = self.configure_LAPACK(options) if os.getenv('LIBSCALAPACK', None) is not None: options = self.configure_ScaLAPACK(options) # PLUMED plumed = get_software_root('PLUMED') if self.cfg['plumed'] and not plumed: raise EasyBuildError("The PLUMED module needs to be loaded to build CP2K with PLUMED support") # enable PLUMED support if PLUMED is listed as a dependency # and PLUMED support is either explicitly enabled (plumed = True) or unspecified ('plumed' not defined) if plumed and (self.cfg['plumed'] or self.cfg['plumed'] is None): options['LIBS'] += ' -lplumed' options['DFLAGS'] += ' -D__PLUMED2' # ELPA elpa = get_software_root('ELPA') if elpa: options['LIBS'] += ' -lelpa' elpa_inc_dir = os.path.join(elpa, 'include', 'elpa-%s' % get_software_version('ELPA'), 'modules') options['FCFLAGSOPT'] += ' -I%s ' % elpa_inc_dir if LooseVersion(self.version) >= LooseVersion('6.1'): elpa_ver = ''.join(get_software_version('ELPA').split('.')[:2]) options['DFLAGS'] += ' -D__ELPA=%s' % elpa_ver elpa_inc_dir = os.path.join(elpa, 'include', 'elpa-%s' % get_software_version('ELPA'), 'elpa') options['FCFLAGSOPT'] += ' -I%s ' % elpa_inc_dir else: options['DFLAGS'] += ' -D__ELPA3' # CUDA cuda = get_software_root('CUDA') if cuda: options['DFLAGS'] += ' -D__ACC -D__DBCSR_ACC' options['LIBS'] += ' -lcudart -lcublas -lcufft -lrt' options['NVCC'] = ' nvcc' # avoid group nesting options['LIBS'] = options['LIBS'].replace('-Wl,--start-group', '').replace('-Wl,--end-group', '') options['LIBS'] = "-Wl,--start-group %s -Wl,--end-group" % options['LIBS'] # specify correct location for 'data' directory in final installation options['DATA_DIR'] = os.path.join(self.installdir, 'data') # create arch file using options set archfile = os.path.join(self.cfg['start_dir'], 'arch', '%s.%s' % (self.typearch, self.cfg['type'])) txt = self._generate_makefile(options) write_file(archfile, txt) self.log.info("Content of makefile (%s):\n%s" % (archfile, txt))
def configure_common(self): """Common configuration for all toolchains""" # openmp introduces 2 major differences # -automatic is default: -noautomatic -auto-scalar # some mem-bandwidth optimisation if self.cfg['type'] == 'psmp': self.openmp = self.toolchain.get_flag('openmp') # determine which opt flags to use if self.cfg['typeopt']: optflags = 'OPT' regflags = 'OPT2' else: optflags = 'NOOPT' regflags = 'NOOPT' # make sure a MPI-2 able MPI lib is used mpi2 = False if hasattr(self.toolchain, 'MPI_FAMILY') and self.toolchain.MPI_FAMILY is not None: known_mpi2_fams = [toolchain.MPICH, toolchain.MPICH2, toolchain.MVAPICH2, toolchain.OPENMPI, toolchain.INTELMPI] mpi_fam = self.toolchain.mpi_family() if mpi_fam in known_mpi2_fams: mpi2 = True self.log.debug("Determined MPI2 compatibility based on MPI toolchain component: %s" % mpi_fam) else: self.log.debug("Cannot determine MPI2 compatibility based on MPI toolchain component: %s" % mpi_fam) else: # can't use toolchain.mpi_family, because of system toolchain mpi2libs = ['impi', 'MVAPICH2', 'OpenMPI', 'MPICH2', 'MPICH'] for mpi2lib in mpi2libs: if get_software_root(mpi2lib): mpi2 = True self.log.debug("Determined MPI2 compatibility based on loaded MPI module: %s") else: self.log.debug("MPI-2 supporting MPI library %s not loaded.") if not mpi2: raise EasyBuildError("CP2K needs MPI-2, no known MPI-2 supporting library loaded?") cppflags = os.getenv('CPPFLAGS') ldflags = os.getenv('LDFLAGS') cflags = os.getenv('CFLAGS') fflags = os.getenv('FFLAGS') fflags_lowopt = re.sub('-O[0-9]', '-O1', fflags) options = { 'CC': os.getenv('MPICC'), 'CPP': '', 'FC': '%s %s' % (os.getenv('MPIF90'), self.openmp), 'LD': '%s %s' % (os.getenv('MPIF90'), self.openmp), 'AR': 'ar -r', 'CPPFLAGS': '', 'FPIC': self.fpic, 'DEBUG': self.debug, 'FCFLAGS': '$(FCFLAGS%s)' % optflags, 'FCFLAGS2': '$(FCFLAGS%s)' % regflags, 'CFLAGS': ' %s %s %s $(FPIC) $(DEBUG) %s ' % (cflags, cppflags, ldflags, self.cfg['extracflags']), 'DFLAGS': ' -D__parallel -D__BLACS -D__SCALAPACK -D__FFTSG %s' % self.cfg['extradflags'], 'LIBS': os.getenv('LIBS', ''), 'FCFLAGSNOOPT': '$(DFLAGS) $(CFLAGS) -O0 $(FREE) $(FPIC) $(DEBUG)', 'FCFLAGSOPT': '%s $(FREE) $(SAFE) $(FPIC) $(DEBUG)' % fflags, 'FCFLAGSOPT2': '%s $(FREE) $(SAFE) $(FPIC) $(DEBUG)' % fflags_lowopt, } libint = get_software_root('LibInt') if libint: options['DFLAGS'] += ' -D__LIBINT' libintcompiler = "%s %s" % (os.getenv('CC'), os.getenv('CFLAGS')) # Build libint-wrapper, if required libint_wrapper = '' # required for old versions of GCC if not self.compilerISO_C_BINDING: options['DFLAGS'] += ' -D__HAS_NO_ISO_C_BINDING' # determine path for libint_tools dir libinttools_paths = ['libint_tools', 'tools/hfx_tools/libint_tools'] libinttools_path = None for path in libinttools_paths: path = os.path.join(self.cfg['start_dir'], path) if os.path.isdir(path): libinttools_path = path change_dir(libinttools_path) if not libinttools_path: raise EasyBuildError("No libinttools dir found") # build libint wrapper cmd = "%s -c libint_cpp_wrapper.cpp -I%s/include" % (libintcompiler, libint) if not run_cmd(cmd, log_all=True, simple=True): raise EasyBuildError("Building the libint wrapper failed") libint_wrapper = '%s/libint_cpp_wrapper.o' % libinttools_path # determine Libint libraries based on major version number libint_maj_ver = get_software_version('Libint').split('.')[0] if libint_maj_ver == '1': libint_libs = "$(LIBINTLIB)/libderiv.a $(LIBINTLIB)/libint.a $(LIBINTLIB)/libr12.a" elif libint_maj_ver == '2': libint_libs = "$(LIBINTLIB)/libint2.a" else: raise EasyBuildError("Don't know how to handle libint version %s", libint_maj_ver) self.log.info("Using Libint version %s" % (libint_maj_ver)) options['LIBINTLIB'] = '%s/lib' % libint options['LIBS'] += ' %s -lstdc++ %s' % (libint_libs, libint_wrapper) # add Libint include dir to $FCFLAGS options['FCFLAGS'] += ' -I' + os.path.join(libint, 'include') else: # throw a warning, since CP2K without Libint doesn't make much sense self.log.warning("Libint module not loaded, so building without Libint support") libxc = get_software_root('libxc') if libxc: cur_libxc_version = get_software_version('libxc') if LooseVersion(self.version) >= LooseVersion('6.1'): libxc_min_version = '4.0.3' options['DFLAGS'] += ' -D__LIBXC' else: libxc_min_version = '2.0.1' options['DFLAGS'] += ' -D__LIBXC2' if LooseVersion(cur_libxc_version) < LooseVersion(libxc_min_version): raise EasyBuildError("This version of CP2K is not compatible with libxc < %s" % libxc_min_version) if LooseVersion(cur_libxc_version) >= LooseVersion('4.0.3'): # cfr. https://www.cp2k.org/howto:compile#k_libxc_optional_wider_choice_of_xc_functionals options['LIBS'] += ' -L%s/lib -lxcf03 -lxc' % libxc elif LooseVersion(cur_libxc_version) >= LooseVersion('2.2'): options['LIBS'] += ' -L%s/lib -lxcf90 -lxc' % libxc else: options['LIBS'] += ' -L%s/lib -lxc' % libxc self.log.info("Using Libxc-%s" % cur_libxc_version) else: self.log.info("libxc module not loaded, so building without libxc support") return options
def configure_step(self): """ Configure build by creating tools/build/user.settings from configure options. """ # construct build options defines = ['NDEBUG'] self.cfg.update('buildopts', "mode=release") self.cxx = os.getenv('CC_SEQ') if self.cxx is None: self.cxx = os.getenv('CC') cxx_ver = None if self.toolchain.comp_family() in [toolchain.GCC ]: #@UndefinedVariable cxx_ver = '.'.join(get_software_version('GCC').split('.')[:2]) elif self.toolchain.comp_family() in [toolchain.INTELCOMP ]: #@UndefinedVariable cxx_ver = '.'.join(get_icc_version().split('.')[:2]) else: raise EasyBuildError( "Don't know how to determine C++ compiler version.") self.cfg.update('buildopts', "cxx=%s cxx_ver=%s" % (self.cxx, cxx_ver)) if self.toolchain.options.get('usempi', None): self.cfg.update('buildopts', 'extras=mpi') defines.extend(['USEMPI', 'MPICH_IGNORE_CXX_SEEK']) # make sure important environment variables are passed down # e.g., compiler env vars for MPI wrappers env_vars = {} for (key, val) in os.environ.items(): if key in [ 'I_MPI_CC', 'I_MPI_CXX', 'MPICH_CC', 'MPICH_CXX', 'OMPI_CC', 'OMPI_CXX' ]: env_vars.update({key: val}) self.log.debug("List of extra environment variables to pass down: %s" % str(env_vars)) # create user.settings file paths = os.getenv('PATH').split(':') ld_library_paths = os.getenv('LD_LIBRARY_PATH').split(':') cpaths = os.getenv('CPATH').split(':') flags = [ str(f).strip('-') for f in self.toolchain.variables['CXXFLAGS'].copy() ] txt = '\n'.join([ "settings = {", " 'user': {", " 'prepends': {", " 'library_path': %s," % str(ld_library_paths), " 'include_path': %s," % str(cpaths), " },", " 'appends': {", " 'program_path': %s," % str(paths), " 'flags': {", " 'compile': %s," % str(flags), #" 'mode': %s," % str(o_flags), " },", " 'defines': %s," % str(defines), " },", " 'overrides': {", " 'cc': '%s'," % os.getenv('CC'), " 'cxx': '%s'," % os.getenv('CXX'), " 'ENV': {", " 'INTEL_LICENSE_FILE': '%s'," % os.getenv('INTEL_LICENSE_FILE'), # Intel license file " 'PATH': %s," % str(paths), " 'LD_LIBRARY_PATH': %s," % str(ld_library_paths), ]) txt += '\n' for (key, val) in env_vars.items(): txt += " '%s': '%s',\n" % (key, val) txt += '\n'.join([ " },", " },", " 'removes': {", " },", " },", "}", ]) us_fp = os.path.join(self.srcdir, "tools/build/user.settings") try: self.log.debug("Creating '%s' with: %s" % (us_fp, txt)) f = file(us_fp, 'w') f.write(txt) f.close() except IOError, err: raise EasyBuildError("Failed to write settings file %s: %s", us_fp, err)
def configure_step(self): """ Configure VMD for building. """ # make sure required dependencies are available deps = {} for dep in ['FLTK', 'Mesa', 'netCDF', 'Python', 'Tcl', 'Tk']: deps[dep] = get_software_root(dep) if deps[dep] is None: raise EasyBuildError("Required dependency %s is missing", dep) # optional dependencies for dep in ['ACTC', 'CUDA', 'OptiX']: deps[dep] = get_software_root(dep) # specify Tcl/Tk locations & libraries tclinc = os.path.join(deps['Tcl'], 'include') tcllib = os.path.join(deps['Tcl'], 'lib') env.setvar('TCL_INCLUDE_DIR', tclinc) env.setvar('TCL_LIBRARY_DIR', tcllib) env.setvar('TK_INCLUDE_DIR', os.path.join(deps['Tk'], 'include')) env.setvar('TK_LIBRARY_DIR', os.path.join(deps['Tk'], 'lib')) tclshortver = '.'.join(get_software_version('Tcl').split('.')[:2]) self.cfg.update('buildopts', 'TCLLDFLAGS="-ltcl%s"' % tclshortver) # Netcdf locations netcdfinc = os.path.join(deps['netCDF'], 'include') netcdflib = os.path.join(deps['netCDF'], 'lib') # Python locations pyshortver = '.'.join(get_software_version('Python').split('.')[:2]) env.setvar('PYTHON_INCLUDE_DIR', os.path.join(deps['Python'], 'include/python%s' % pyshortver)) pylibdir = det_pylibdir() python_libdir = os.path.join(deps['Python'], os.path.dirname(pylibdir)) env.setvar('PYTHON_LIBRARY_DIR', python_libdir) # numpy include location, easiest way to determine it is via numpy.get_include() out, ec = run_cmd("python -c 'import numpy; print numpy.get_include()'", simple=False) if ec: raise EasyBuildError("Failed to determine numpy include directory: %s", out) else: env.setvar('NUMPY_INCLUDE_DIR', out.strip()) # compiler commands self.cfg.update('buildopts', 'CC="%s"' % os.getenv('CC')) self.cfg.update('buildopts', 'CCPP="%s"' % os.getenv('CXX')) # plugins need to be built first (see http://www.ks.uiuc.edu/Research/vmd/doxygen/compiling.html) change_dir(os.path.join(self.builddir, 'plugins')) cmd = ' '.join([ 'make', 'LINUXAMD64', "TCLINC='-I%s'" % tclinc, "TCLLIB='-L%s'" % tcllib, "TCLLDFLAGS='-ltcl%s'" % tclshortver, "NETCDFINC='-I%s'" % netcdfinc, "NETCDFLIB='-L%s'" % netcdflib, self.cfg['buildopts'], ]) run_cmd(cmd, log_all=True, simple=False) # create plugins distribution plugindir = os.path.join(self.vmddir, 'plugins') env.setvar('PLUGINDIR', plugindir) self.log.info("Generating VMD plugins in %s", plugindir) run_cmd("make distrib %s" % self.cfg['buildopts'], log_all=True, simple=False) # explicitely mention whether or not we're building with CUDA/OptiX support if deps['CUDA']: self.log.info("Building with CUDA %s support", get_software_version('CUDA')) if deps['OptiX']: self.log.info("Building with Nvidia OptiX %s support", get_software_version('OptiX')) else: self.log.warn("Not building with Nvidia OptiX support!") else: self.log.warn("Not building with CUDA nor OptiX support!") # see http://www.ks.uiuc.edu/Research/vmd/doxygen/configure.html # LINUXAMD64: Linux 64-bit # LP64: build VMD as 64-bit binary # IMD: enable support for Interactive Molecular Dynamics (e.g. to connect to NAMD for remote simulations) # PTHREADS: enable support for POSIX threads # COLVARS: enable support for collective variables (related to NAMD/LAMMPS) # NOSILENT: verbose build command # FLTK: enable the standard FLTK GUI # TK: enable TK to support extension GUI elements # OPENGL: enable OpenGL self.cfg.update( 'configopts', "LINUXAMD64 LP64 IMD PTHREADS COLVARS NOSILENT FLTK TK OPENGL", allow_duplicate=False) # add additional configopts based on available dependencies for key in deps: if deps[key]: if key == 'Mesa': self.cfg.update('configopts', "OPENGL MESA", allow_duplicate=False) elif key == 'OptiX': self.cfg.update('configopts', "LIBOPTIX", allow_duplicate=False) elif key == 'Python': self.cfg.update('configopts', "PYTHON NUMPY", allow_duplicate=False) else: self.cfg.update('configopts', key.upper(), allow_duplicate=False) # configure for building with Intel compilers specifically if self.toolchain.comp_family() == toolchain.INTELCOMP: self.cfg.update('configopts', 'ICC', allow_duplicate=False) # specify install location using environment variables env.setvar('VMDINSTALLBINDIR', os.path.join(self.installdir, 'bin')) env.setvar('VMDINSTALLLIBRARYDIR', os.path.join(self.installdir, 'lib')) # configure in vmd-<version> directory change_dir(self.vmddir) run_cmd("%s ./configure %s" % (self.cfg['preconfigopts'], self.cfg['configopts'])) # change to 'src' subdirectory, ready for building change_dir(os.path.join(self.vmddir, 'src'))