def _simulated_load_dependency_module(self, name, version, metadata, verbose=False): """ Set environment variables picked up by utility functions for dependencies specified as external modules. @param name: software name @param version: software version @param metadata: dictionary with software metadata ('prefix' for software installation prefix) """ self.log.debug("Defining $EB* environment variables for software named %s", name) # define $EBROOT env var for install prefix, picked up by get_software_root prefix = metadata.get('prefix') if prefix is not None: if prefix in os.environ: val = os.environ[prefix] self.log.debug("Using value of $%s as prefix for software named %s: %s", prefix, name, val) else: val = prefix self.log.debug("Using specified prefix for software named %s: %s", name, val) setvar(get_software_root_env_var_name(name), val, verbose=verbose) # define $EBVERSION env var for software version, picked up by get_software_version if version is not None: setvar(get_software_version_env_var_name(name), version, verbose=verbose)
def correct_mpich_build_env(self): """ Method to correctly set the environment for MPICH and derivatives """ env_vars = ["CFLAGS", "CPPFLAGS", "CXXFLAGS", "FCFLAGS", "FFLAGS", "LDFLAGS", "LIBS"] vars_to_unset = ["F90", "F90FLAGS"] for envvar in env_vars: envvar_val = os.getenv(envvar) if envvar_val: new_envvar = "MPICHLIB_%s" % envvar new_envvar_val = os.getenv(new_envvar) vars_to_unset.append(envvar) if envvar_val == new_envvar_val: self.log.debug("$%s == $%s, just defined $%s as empty", envvar, new_envvar, envvar) elif new_envvar_val is None: env.setvar(new_envvar, envvar_val) else: raise EasyBuildError( "Both $%s and $%s set, can I overwrite $%s with $%s (%s) ?", envvar, new_envvar, new_envvar, envvar, envvar_val, ) env.unset_env_vars(vars_to_unset)
def configure_step(self): """Configure build: set config options and configure""" if LooseVersion(self.version) < LooseVersion("4.3"): self.cfg.update('configopts', "--enable-shared") if self.toolchain.options['pic']: self.cfg.update('configopts', '--with-pic') # tup = (os.getenv('FFLAGS'), os.getenv('MPICC'), os.getenv('F90')) tup = (os.getenv('FFLAGS'), os.getenv('CC'), os.getenv('F90')) self.cfg.update('configopts', 'FCFLAGS="%s" CC="%s" FC="%s"' % tup) # add -DgFortran to CPPFLAGS when building with GCC if self.toolchain.comp_family() == toolchain.GCC: #@UndefinedVariable self.cfg.update('configopts', 'CPPFLAGS="%s -DgFortran"' % os.getenv('CPPFLAGS')) ConfigureMake.configure_step(self) else: hdf5 = get_software_root('HDF5') if hdf5: env.setvar('HDF5_ROOT', hdf5) CMakeMake.configure_step(self)
def test_cases_step(self): """Run test cases, if specified.""" for test in self.cfg['tests']: # check expected format if not len(test) == 4: raise EasyBuildError("WIEN2k test case not specified in expected format: " "(testcase_name, init_lapw_args, run_lapw_args, [scf_regexp_pattern])") test_name = test[0] init_args = test[1] run_args = test[2] scf_regexp_patterns = test[3] try: cwd = os.getcwd() # WIEN2k enforces that working dir has same name as test case tmpdir = os.path.join(tempfile.mkdtemp(), test_name) scratch = os.path.join(tmpdir, 'scratch') mkdir(scratch, parents=True) env.setvar('SCRATCH', scratch) os.chdir(tmpdir) self.log.info("Running test case %s in %s" % (test_name, tmpdir)) except OSError as err: raise EasyBuildError("Failed to create temporary directory for test %s: %s", test_name, err) # try and find struct file for test test_fp = self.obtain_file("%s.struct" % test_name) try: shutil.copy2(test_fp, tmpdir) except OSError as err: raise EasyBuildError("Failed to copy %s: %s", test_fp, err) # run test cmd = "init_lapw %s" % init_args run_cmd(cmd, log_all=True, simple=True) cmd = "run_lapw %s" % run_args run_cmd(cmd, log_all=True, simple=True) # check output scf_fn = "%s.scf" % test_name self.log.debug("Checking output of test %s in %s" % (str(test), scf_fn)) scftxt = read_file(scf_fn) for regexp_pat in scf_regexp_patterns: regexp = re.compile(regexp_pat, re.M) if not regexp.search(scftxt): raise EasyBuildError("Failed to find pattern %s in %s", regexp.pattern, scf_fn) else: self.log.debug("Found pattern %s in %s" % (regexp.pattern, scf_fn)) # cleanup try: os.chdir(cwd) rmtree2(tmpdir) except OSError as err: raise EasyBuildError("Failed to clean up temporary test dir: %s", err)
def install_step(self): """Custom install procedure for VSC-tools.""" args = "install --prefix=%(path)s --install-lib=%(path)s/lib" % {'path': self.installdir} pylibdir = os.path.join(self.installdir, 'lib') env.setvar('PYTHONPATH', '%s:%s' % (pylibdir, os.getenv('PYTHONPATH'))) try: os.mkdir(pylibdir) pwd = os.getcwd() pkg_list = ['-'.join(src['name'].split('-')[0:-1]) for src in self.src if src['name'].startswith('vsc')] for pkg in pkg_list: os.chdir(self.builddir) sel_dirs = [d for d in glob.glob("%s-[0-9][0-9.]*" % pkg)] if not len(sel_dirs) == 1: self.log.error("Found none or more than one %s dir in %s: %s" % (pkg, self.builddir, sel_dirs)) os.chdir(os.path.join(self.builddir, sel_dirs[0])) cmd = "python setup.py %s" % args run_cmd(cmd, log_all=True, simple=True, log_output=True) os.chdir(pwd) except OSError, err: self.log.error("Failed to install: %s" % err)
def sanity_check_step(self): """Custom sanity check for TensorFlow.""" custom_paths = { 'files': ['bin/tensorboard'], 'dirs': [self.pylibdir], } custom_commands = [ "%s -c 'import tensorflow'" % self.python_cmd, # tf_should_use importsweakref.finalize, which requires backports.weakref for Python < 3.4 "%s -c 'from tensorflow.python.util import tf_should_use'" % self.python_cmd, ] res = super(EB_TensorFlow, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands) # determine top-level directory # start_dir is not set when TensorFlow is installed as an extension, then fall back to ext_dir topdir = self.start_dir or self.ext_dir # test installation using MNIST tutorial examples if self.cfg['runtest']: pythonpath = os.getenv('PYTHONPATH', '') env.setvar('PYTHONPATH', os.pathsep.join([os.path.join(self.installdir, self.pylibdir), pythonpath])) for mnist_py in ['mnist_softmax.py', 'mnist_with_summaries.py']: datadir = tempfile.mkdtemp(suffix='-tf-%s-data' % os.path.splitext(mnist_py)[0]) logdir = tempfile.mkdtemp(suffix='-tf-%s-logs' % os.path.splitext(mnist_py)[0]) mnist_py = os.path.join(topdir, 'tensorflow', 'examples', 'tutorials', 'mnist', mnist_py) cmd = "%s %s --data_dir %s --log_dir %s" % (self.python_cmd, mnist_py, datadir, logdir) run_cmd(cmd, log_all=True, simple=True, log_ok=True) return res
def prepare_step(self): """Custom prepare step for IntelBase. Set up the license""" super(IntelBase, self).prepare_step() default_lic_env_var = 'INTEL_LICENSE_FILE' lic_specs, self.license_env_var = find_flexlm_license(custom_env_vars=[default_lic_env_var], lic_specs=[self.cfg['license_file']]) if lic_specs: if self.license_env_var is None: self.log.info("Using Intel license specifications from 'license_file': %s", lic_specs) self.license_env_var = default_lic_env_var else: self.log.info("Using Intel license specifications from $%s: %s", self.license_env_var, lic_specs) self.license_file = os.pathsep.join(lic_specs) env.setvar(self.license_env_var, self.license_file) # if we have multiple retained lic specs, specify to 'use a license which exists on the system' if len(lic_specs) > 1: self.log.debug("More than one license specs found, using '%s' license activation instead of '%s'", ACTIVATION_EXIST_LIC, self.cfg['license_activation']) self.cfg['license_activation'] = ACTIVATION_EXIST_LIC # $INTEL_LICENSE_FILE should always be set during installation with existing license env.setvar(default_lic_env_var, self.license_file) else: msg = "No viable license specifications found; " msg += "specify 'license_file', or define $INTEL_LICENSE_FILE or $LM_LICENSE_FILE" raise EasyBuildError(msg)
def prepare(self, *args, **kwargs): """Prepare to use this toolchain; define $CRAYPE_LINK_TYPE if 'dynamic' toolchain option is enabled.""" super(CrayPECompiler, self).prepare(*args, **kwargs) if self.options['dynamic'] or self.options['shared']: self.log.debug("Enabling building of shared libs/dynamically linked executables via $CRAYPE_LINK_TYPE") env.setvar('CRAYPE_LINK_TYPE', 'dynamic')
def _setenv_variables(self, donotset=None): """Actually set the environment variables""" self.log.debug("_setenv_variables: setting variables: donotset=%s" % donotset) donotsetlist = [] if isinstance(donotset, str): # TODO : more legacy code that should be using proper type self.log.raiseException("_setenv_variables: using commas-separated list. should be deprecated.") donotsetlist = donotset.split(',') elif isinstance(donotset, list): donotsetlist = donotset for key, val in self.vars.items(): if key in donotsetlist: self.log.debug("_setenv_variables: not setting environment variable %s (value: %s)." % (key, val)) continue self.log.debug("_setenv_variables: setting environment variable %s to %s" % (key, val)) setvar(key, val) # also set unique named variables that can be used in Makefiles # - so you can have 'CFLAGS = $(EBVARCFLAGS)' # -- 'CLFLAGS = $(CFLAGS)' gives '*** Recursive variable `CFLAGS' # references itself (eventually). Stop' error setvar("EBVAR%s" % key, val)
def test_cases_step(self): """Run test cases, if specified.""" for test in self.cfg['tests']: # check expected format if not len(test) == 4: raise EasyBuildError("WIEN2k test case not specified in expected format: " "(testcase_name, init_lapw_args, run_lapw_args, [scf_regexp_pattern])") test_name = test[0] init_args = test[1] run_args = test[2] scf_regexp_patterns = test[3] try: cwd = os.getcwd() # WIEN2k enforces that working dir has same name as test case tmpdir = os.path.join(tempfile.mkdtemp(), test_name) scratch = os.path.join(tmpdir, 'scratch') mkdir(scratch, parents=True) env.setvar('SCRATCH', scratch) os.chdir(tmpdir) self.log.info("Running test case %s in %s" % (test_name, tmpdir)) except OSError, err: raise EasyBuildError("Failed to create temporary directory for test %s: %s", test_name, err) # try and find struct file for test test_fp = self.obtain_file("%s.struct" % test_name) try: shutil.copy2(test_fp, tmpdir) except OSError, err: raise EasyBuildError("Failed to copy %s: %s", test_fp, err)
def configure_step(self): """Configure FSL build: set FSLDIR env var.""" self.fsldir = self.cfg['start_dir'] env.setvar('FSLDIR', self.fsldir) # determine FSL machine type cmd = ". %s/etc/fslconf/fslmachtype.sh" % self.fsldir (out, _) = run_cmd(cmd, log_all=True, simple=False) fslmachtype = out.strip() self.log.debug("FSL machine type: %s" % fslmachtype) # prepare config # either using matching config, or copy closest match cfgdir = os.path.join(self.fsldir, "config") try: cfgs = os.listdir(cfgdir) best_cfg = difflib.get_close_matches(fslmachtype, cfgs)[0] self.log.debug("Best matching config dir for %s is %s" % (fslmachtype, best_cfg)) if fslmachtype != best_cfg: srcdir = os.path.join(cfgdir, best_cfg) tgtdir = os.path.join(cfgdir, fslmachtype) shutil.copytree(srcdir, tgtdir) self.log.debug("Copied %s to %s" % (srcdir, tgtdir)) except OSError, err: self.log.error("Failed to copy closest matching config dir: %s" % err)
def configure_step(self): """Custom configuration procedure for pplacer.""" # install dir has to be non-existing when we start (it may be there from a previous (failed) install try: if os.path.exists(self.installdir): shutil.rmtree(self.installdir) self.log.warning("Existing install directory %s removed", self.installdir) except OSError as err: raise EasyBuildError("Failed to remove %s: %s", self.installdir, err) # configure OPAM to install pplacer dependencies env.setvar('OPAMROOT', self.installdir) opam_init_cmd = mk_opam_init_cmd() run_cmd(opam_init_cmd) run_cmd("opam repo add pplacer-deps %s/pplacer-opam-repository*/" % self.builddir) run_cmd("opam update pplacer-deps") env.setvar('OCAML_BACKEND', 'gcc') run_cmd("eval `opam config env` && cat opam-requirements.txt | xargs -t opam install -y") txt = "let version = \"v%s\"\n" % self.version write_file(os.path.join(self.builddir, 'pplacer-%s' % self.version, 'common_src', 'version.ml'), txt)
def configure_step(self): """Configure build: set require config and make options, and run configure script.""" for dep in self.known_deps: root = get_software_root(dep['name']) if root: self.cfg.update('configopts', '--with-%s=%s' % (dep['with'], root)) fcomp = 'FC="%s"' % os.getenv('F90') self.cfg.update('configopts', "--with-pic --with-pthread --enable-shared") self.cfg.update('configopts', "--enable-cxx --enable-fortran %s" % fcomp) # MPI and C++ support enabled requires --enable-unsupported, because this is untested by HDF5 # also returns False if MPI is not supported by this toolchain if self.toolchain.options.get('usempi', None): self.cfg.update('configopts', "--enable-unsupported --enable-parallel") mpich_mpi_families = [toolchain.INTELMPI, toolchain.MPICH, toolchain.MPICH2, toolchain.MVAPICH2] if self.toolchain.mpi_family() in mpich_mpi_families: self.cfg.update('buildopts', 'CXXFLAGS="$CXXFLAGS -DMPICH_IGNORE_CXX_SEEK"') else: self.cfg.update('configopts', "--disable-parallel") # make options self.cfg.update('buildopts', fcomp) # set RUNPARALLEL if MPI is not enabled (or not supported by this toolchain) if self.toolchain.options.get('usempi', None): env.setvar('RUNPARALLEL', 'mpirun -np \$\${NPROCS:=2}') super(EB_HDF5, self).configure_step()
def configure_step(self): """Configure: handle license file and clean home dir.""" # obtain license path try: self.license_file = self.cfg['license_file'] except: # the default should exist self.log.deprecated('No new style license_file parameter, license_file is now mandatory', '2.0') self.license_file = None if self.license_file is None: self.log.deprecated('Checking for old style license', '2.0') self.cfg.enable_templating = False lic = self.cfg['license'] # old style license is a path (type string) if isinstance(lic, License) and isinstance(lic, str): self.log.deprecated('No old style license parameter, license has to be pure License subclass', '2.0') self.license_file = lic self.cfg.enable_templating = True if self.license_file: self.log.info("Using license file %s" % self.license_file) else: self.log.error("No license file defined") # verify license path if not os.path.exists(self.license_file): self.log.error("Can't find license at %s" % self.license_file) # set INTEL_LICENSE_FILE env.setvar("INTEL_LICENSE_FILE", self.license_file) # clean home directory self.clean_home_subdir()
def configure_step(self, srcdir=None, builddir=None): """Configure build using cmake""" if builddir is not None: self.log.nosupport("CMakeMake.configure_step: named argument 'builddir' (should be 'srcdir')", "2.0") # Set the search paths for CMake tc_ipaths = self.toolchain.get_variable("CPPFLAGS", list) tc_lpaths = self.toolchain.get_variable("LDFLAGS", list) cpaths = os.getenv('CPATH', '').split(os.pathsep) lpaths = os.getenv('LD_LIBRARY_PATH', '').split(os.pathsep) include_paths = os.pathsep.join(nub(tc_ipaths + cpaths)) library_paths = os.pathsep.join(nub(tc_lpaths + lpaths)) setvar("CMAKE_INCLUDE_PATH", include_paths) setvar("CMAKE_LIBRARY_PATH", library_paths) default_srcdir = '.' if self.cfg.get('separate_build_dir', False): objdir = os.path.join(self.builddir, 'easybuild_obj') try: os.mkdir(objdir) os.chdir(objdir) except OSError, err: raise EasyBuildError("Failed to create separate build dir %s in %s: %s", objdir, os.getcwd(), err) default_srcdir = self.cfg['start_dir']
def _prepare_dependency_external_module(self, dep): """Set environment variables picked up by utility functions for dependencies specified as external modules.""" mod_name = dep['full_mod_name'] metadata = dep['external_module_metadata'] self.log.debug("Defining $EB* environment variables for external module %s", mod_name) names = metadata.get('name', []) versions = metadata.get('version', [None]*len(names)) self.log.debug("Metadata for external module %s: %s", mod_name, metadata) for name, version in zip(names, versions): self.log.debug("Defining $EB* environment variables for external module %s under name %s", mod_name, name) # define $EBROOT env var for install prefix, picked up by get_software_root prefix = metadata.get('prefix') if prefix is not None: if prefix in os.environ: val = os.environ[prefix] self.log.debug("Using value of $%s as prefix for external module %s: %s", prefix, mod_name, val) else: val = prefix self.log.debug("Using specified prefix for external module %s: %s", mod_name, val) setvar(get_software_root_env_var_name(name), val) # define $EBVERSION env var for software version, picked up by get_software_version if version is not None: setvar(get_software_version_env_var_name(name), version)
def configure_step(self): """Configure: handle license file and clean home dir.""" # obtain license path self.license = self.cfg['license'] if self.license: self.log.info("Using license %s" % self.license) else: self.log.error("No license defined") # verify license path if not os.path.exists(self.license): self.log.error("Can't find license at %s" % self.license) # set INTEL_LICENSE_FILE env.setvar("INTEL_LICENSE_FILE", self.license) # patch install scripts with randomly suffixed intel hom subdir for fn in ["install.sh", "pset/install.sh", "pset/iat/iat_install.sh", "data/install_mpi.sh", "pset/install_cc.sh", "pset/install_fc.sh"]: try: if os.path.isfile(fn): self.log.info("Patching %s with intel home subdir %s" % (fn, self.home_subdir)) for line in fileinput.input(fn, inplace=1, backup='.orig'): line = re.sub(r'(.*)(NONRPM_DB_PREFIX="\$HOME/)intel(.*)', r'\1\2%s\3' % self.home_subdir, line) line = re.sub(r'(.*)(DEFAULT_DB_PREFIX="\$\(echo ~\)/)intel(.*)', r'\1\2%s\3' % self.home_subdir, line) sys.stdout.write(line) except (OSError, IOError), err: self.log.error("Failed to modify install script %s with randomly suffixed home subdir: %s" % (fn, err))
def _simulated_load_dependency_module(self, name, version, metadata, verbose=False): """ Set environment variables picked up by utility functions for dependencies specified as external modules. @param name: software name @param version: software version @param metadata: dictionary with software metadata ('prefix' for software installation prefix) """ self.log.debug("Defining $EB* environment variables for software named %s", name) # define $EBROOT env var for install prefix, picked up by get_software_root prefix = metadata.get('prefix') if prefix is not None: # the prefix can be specified in a number of ways # * name of environment variable (+ optional relative path to combine it with; format: <name>/<relpath> # * filepath (assumed if environment variable is not defined) parts = prefix.split(os.path.sep) env_var = parts[0] if env_var in os.environ: prefix = os.environ[env_var] rel_path = os.path.sep.join(parts[1:]) if rel_path: prefix = os.path.join(prefix, rel_path, '') self.log.debug("Derived prefix for software named %s from $%s (rel path: %s): %s", name, env_var, rel_path, prefix) else: self.log.debug("Using specified path as prefix for software named %s: %s", name, prefix) setvar(get_software_root_env_var_name(name), prefix, verbose=verbose) # define $EBVERSION env var for software version, picked up by get_software_version if version is not None: setvar(get_software_version_env_var_name(name), version, verbose=verbose)
def configure_step(self): """Configure Python package build/install.""" if self.sitecfg is not None: # used by some extensions, like numpy, to find certain libs finaltxt = self.sitecfg if self.sitecfglibdir: repl = self.sitecfglibdir finaltxt = finaltxt.replace('SITECFGLIBDIR', repl) if self.sitecfgincdir: repl = self.sitecfgincdir finaltxt = finaltxt.replace('SITECFGINCDIR', repl) self.log.debug("Using %s: %s" % (self.sitecfgfn, finaltxt)) try: if os.path.exists(self.sitecfgfn): txt = open(self.sitecfgfn).read() self.log.debug("Found %s: %s" % (self.sitecfgfn, txt)) config = open(self.sitecfgfn, 'w') config.write(finaltxt) config.close() except IOError: raise EasyBuildError("Creating %s failed", self.sitecfgfn) # creates log entries for python being used, for debugging run_cmd("%s -V" % self.python_cmd, verbose=False) run_cmd("%s -c 'import sys; print(sys.executable)'" % self.python_cmd, verbose=False) # don't add user site directory to sys.path (equivalent to python -s) # see https://www.python.org/dev/peps/pep-0370/ env.setvar('PYTHONNOUSERSITE', '1', verbose=False) run_cmd("%s -c 'import sys; print(sys.path)'" % self.python_cmd, verbose=False)
def install_step(self): """Custom install procedure for EggLib: first build/install C++ library, then build Python library.""" # build/install C++ library cpp_subdir = os.path.join(self.builddir, 'egglib-cpp-%s' % self.version) try: os.chdir(cpp_subdir) except OSError as err: raise EasyBuildError("Failed to move to: %s", err) ConfigureMake.configure_step(self) ConfigureMake.build_step(self) ConfigureMake.install_step(self) # header files and libraries must be found when building Python library for varname, subdir in [('CPATH', 'include'), ('LIBRARY_PATH', 'lib')]: env.setvar(varname, '%s:%s' % (os.path.join(self.installdir, subdir), os.environ.get(varname, ''))) # build/install Python package py_subdir = os.path.join(self.builddir, 'egglib-py-%s' % self.version) try: os.chdir(py_subdir) except OSError as err: raise EasyBuildError("Failed to move to: %s", err) PythonPackage.build_step(self) self.cfg.update('installopts', "--install-lib %s" % os.path.join(self.installdir, self.pylibdir)) self.cfg.update('installopts', "--install-scripts %s" % os.path.join(self.installdir, 'bin')) PythonPackage.install_step(self)
def _setenv_variables(self, donotset=None, verbose=True): """Actually set the environment variables""" self.log.debug("_setenv_variables: setting variables: donotset=%s" % donotset) if self.dry_run: dry_run_msg("Defining build environment...\n", silent=not verbose) donotsetlist = [] if isinstance(donotset, str): # TODO : more legacy code that should be using proper type raise EasyBuildError("_setenv_variables: using commas-separated list. should be deprecated.") elif isinstance(donotset, list): donotsetlist = donotset for key, val in sorted(self.vars.items()): if key in donotsetlist: self.log.debug("_setenv_variables: not setting environment variable %s (value: %s)." % (key, val)) continue self.log.debug("_setenv_variables: setting environment variable %s to %s" % (key, val)) setvar(key, val, verbose=verbose) # also set unique named variables that can be used in Makefiles # - so you can have 'CFLAGS = $(EBVARCFLAGS)' # -- 'CLFLAGS = $(CFLAGS)' gives '*** Recursive variable `CFLAGS' # references itself (eventually). Stop' error setvar("EBVAR%s" % key, val, verbose=False)
def install_step(self): """Install EasyBuild packages one by one.""" # unset $PYTHONPATH to try and avoid that current EasyBuild is picked up, and ends up in easy-install.pth orig_pythonpath = os.getenv('PYTHONPATH') self.log.debug("Original $PYTHONPATH: %s", orig_pythonpath) env.setvar('PYTHONPATH', '') try: subdirs = os.listdir(self.builddir) for pkg in self.easybuild_pkgs: seldirs = [x for x in subdirs if x.startswith(pkg)] if len(seldirs) != 1: # vsc-base sources are optional, can be pulled in from PyPi when installing easybuild-framework too if pkg != 'vsc-base': raise EasyBuildError("Failed to find EasyBuild %s package (subdirs: %s, seldirs: %s)", pkg, subdirs, seldirs) else: self.log.info("Installing EasyBuild package %s" % pkg) os.chdir(os.path.join(self.builddir, seldirs[0])) super(EB_EasyBuildMeta, self).install_step() except OSError, err: raise EasyBuildError("Failed to install EasyBuild packages: %s", err)
def symlink_commands(self, paths): """ Create a symlink for each command to binary/script at specified path. :param paths: dictionary containing one or mappings, each one specified as a tuple: (<path/to/script>, <list of commands to symlink to the script>) """ symlink_dir = tempfile.mkdtemp() # prepend location to symlinks to $PATH setvar('PATH', '%s:%s' % (symlink_dir, os.getenv('PATH'))) for (path, cmds) in paths.values(): for cmd in cmds: cmd_s = os.path.join(symlink_dir, cmd) if not os.path.exists(cmd_s): try: os.symlink(path, cmd_s) except OSError as err: raise EasyBuildError("Failed to symlink %s to %s: %s", path, cmd_s, err) cmd_path = which(cmd) self.log.debug("which(%s): %s -> %s", cmd, cmd_path, os.path.realpath(cmd_path)) self.log.info("Commands symlinked to %s via %s: %s", path, symlink_dir, ', '.join(cmds))
def configure_step(self): """Configure build: set require config and make options, and run configure script.""" # configure options for dependencies deps = [ ("Szip", "--with-szlib"), ("zlib", "--with-zlib"), ] for (dep, opt) in deps: root = get_software_root(dep) if root: self.cfg.update('configopts', '%s=%s' % (opt, root)) else: raise EasyBuildError("Dependency module %s not loaded.", dep) fcomp = 'FC="%s"' % os.getenv('F90') self.cfg.update('configopts', "--with-pic --with-pthread --enable-shared") self.cfg.update('configopts', "--enable-cxx --enable-fortran %s" % fcomp) # MPI and C++ support enabled requires --enable-unsupported, because this is untested by HDF5 # also returns False if MPI is not supported by this toolchain if self.toolchain.options.get('usempi', None): self.cfg.update('configopts', "--enable-unsupported --enable-parallel") else: self.cfg.update('configopts', "--disable-parallel") # make options self.cfg.update('buildopts', fcomp) # set RUNPARALLEL if MPI is not enabled (or not supported by this toolchain) if self.toolchain.options.get('usempi', None): env.setvar('RUNPARALLEL', 'mpirun -np \$\${NPROCS:=2}') super(EB_HDF5, self).configure_step()
def configure_step(self): """Configure: handle license file and clean home dir.""" lic_env_var = 'INTEL_LICENSE_FILE' intel_license_file = os.getenv(lic_env_var) if intel_license_file is None: self.log.debug("Env var $%s not set, trying 'license_file' easyconfig parameter..." % lic_env_var) # obtain license path try: self.license_file = self.cfg['license_file'] except: # the default should exist self.log.deprecated('No new style license_file parameter, license_file is now mandatory', '2.0') self.license_file = None if self.license_file is None: self.log.deprecated('Checking for old style license', '2.0') self.cfg.enable_templating = False lic = self.cfg['license'] # old style license is a path (type string) if isinstance(lic, License) and isinstance(lic, str): self.log.deprecated('No old style license parameter, license has to be pure License subclass', '2.0') self.license_file = lic self.cfg.enable_templating = True if self.license_file: self.log.info("Using license file %s" % self.license_file) else: self.log.error("No license file defined, consider setting $%s that will be picked up" % lic_env_var) # verify license path if not os.path.exists(self.license_file): self.log.error("%s not found, correct 'license_file' value or $%s" % (self.license_file, lic_env_var)) # set INTEL_LICENSE_FILE env.setvar(lic_env_var, self.license_file) else: # iterate through $INTEL_LICENSE_FILE until a .lic file is found for lic in intel_license_file.split(os.pathsep): if os.path.isfile(lic): self.cfg['license_file'] = lic self.license_file = lic else: lic_file = glob.glob("%s/*.lic" % lic) if lic_file is not None: continue # just pick the first .lic, if it's not correct, $INTEL_LICENSE_FILE should be adjusted instead self.cfg['license_file'] = lic_file[0] self.license_file = lic_file[0] self.log.info('Picking the first .lic file from $INTEL_LICENSE_FILE: %s' % lic_file[0]) if not self.license_file: self.log.error("Cannot find a license file in %s" % intel_license_file) self.log.info("Picking up Intel license file specification from $%s: %s" % (lic_env_var, self.license_file)) # clean home directory self.clean_home_subdir()
def test_step(self): """Custom built-in test procedure for DIRAC.""" if self.cfg['runtest']: # set up test environment # see http://diracprogram.org/doc/release-14/installation/testing.html env.setvar('DIRAC_TMPDIR', tempfile.mkdtemp(prefix='dirac-test-')) env.setvar('DIRAC_MPI_COMMAND', self.toolchain.mpi_cmd_for('', self.cfg['parallel'])) # run tests (may take a while, especially if some tests take a while to time out) self.log.info("Running tests may take a while, especially if some tests timeout (default timeout is 1500s)") cmd = "make test" out, ec = run_cmd(cmd, simple=False, log_all=False, log_ok=False) # check that majority of tests pass # some may fail due to timeout, but that's acceptable # cfr. https://groups.google.com/forum/#!msg/dirac-users/zEd5-xflBnY/OQ1pSbuX810J # over 90% of tests should pass passed_regex = re.compile('^(9|10)[0-9.]+% tests passed', re.M) if not passed_regex.search(out) and not self.dry_run: raise EasyBuildError("Too many failed tests; '%s' not found in test output: %s", passed_regex.pattern, out) # extract test results test_result_regex = re.compile(r'^\s*[0-9]+/[0-9]+ Test \s*#[0-9]+: .*', re.M) test_results = test_result_regex.findall(out) if test_results: self.log.info("Found %d test results: %s", len(test_results), test_results) elif self.dry_run: # dummy test result test_results = ["1/1 Test #1: dft_alda_xcfun ............................. Passed 72.29 sec"] else: raise EasyBuildError("Couldn't find *any* test results?") test_count_regex = re.compile(r'^\s*[0-9]+/([0-9]+)') res = test_count_regex.search(test_results[0]) if res: test_count = int(res.group(1)) elif self.dry_run: # a single dummy test result test_count = 1 else: raise EasyBuildError("Failed to determine total test count from %s using regex '%s'", test_results[0], test_count_regex.pattern) if len(test_results) != test_count: raise EasyBuildError("Expected to find %s test results, but found %s", test_count, len(test_results)) # check test results, only 'Passed' or 'Timeout' are acceptable outcomes faulty_tests = [] for test_result in test_results: if ' Passed ' not in test_result: self.log.warning("Found failed test: %s", test_result) if '***Timeout' not in test_result: faulty_tests.append(test_result) if faulty_tests: raise EasyBuildError("Found tests failing due to something else than timeout: %s", faulty_tests)
def __init__(self, *args, **kwargs): """Easyblock constructor: initialise class variables.""" super(EB_Amber, self).__init__(*args, **kwargs) self.build_in_installdir = True self.pylibdir = None self.with_cuda = False self.with_mpi = False env.setvar('AMBERHOME', self.installdir)
def test_step(self): """Run the basic tests (but not necessarily the full regression tests) using make check""" # allow to escape testing by setting runtest to False if not self.cfg['runtest'] and not isinstance(self.cfg['runtest'], bool): # make very sure OMP_NUM_THREADS is set to 1, to avoid hanging GROMACS regression test env.setvar('OMP_NUM_THREADS', '1') self.cfg['runtest'] = 'check' super(EB_GROMACS, self).test_step()
def test_step(self): """Specify to running tests is done using 'make check'.""" # allow to escape testing by setting runtest to False if not self.cfg['runtest'] and not isinstance(self.cfg['runtest'], bool): self.cfg['runtest'] = 'check' # make very sure OMP_NUM_THREADS is set to 1, to avoid hanging GROMACS regression test env.setvar('OMP_NUM_THREADS', '1') super(EB_GROMACS, self).test_step()
def install_step(self): """Custom install procedure to skip selection of python package versions.""" full_pylibdir = os.path.join(self.installdir, self.pylibdir) env.setvar('PYTHONPATH', '%s:%s' % (full_pylibdir, os.getenv('PYTHONPATH'))) try: os.mkdir(full_pylibdir) except OSError, err: # this will raise an error and not return raise EasyBuildError("Failed to install: %s", err)
def install_step(self, silent_cfg_names_map=None): """Actual installation - create silent cfg file - set environment parameters - execute command """ if silent_cfg_names_map is None: silent_cfg_names_map = {} silent = '\n'.join([ "%(activation_name)s=%(activation)s", "%(license_file_name)s=%(license_file)s", "%(install_dir_name)s=%(install_dir)s", "ACCEPT_EULA=accept", "INSTALL_MODE=NONRPM", "CONTINUE_WITH_OPTIONAL_ERROR=yes", ]) % { 'activation_name': silent_cfg_names_map.get('activation_name', 'ACTIVATION'), 'license_file_name': silent_cfg_names_map.get('license_file_name', 'PSET_LICENSE_FILE'), 'install_dir_name': silent_cfg_names_map.get('install_dir_name', 'PSET_INSTALL_DIR'), 'activation': self.cfg['license_activation'], 'license_file': self.license_file, 'install_dir': silent_cfg_names_map.get('install_dir', self.installdir), } # we should be already in the correct directory silentcfg = os.path.join(os.getcwd(), "silent.cfg") try: f = open(silentcfg, 'w') f.write(silent) f.close() except: self.log.exception("Writing silent cfg % failed" % silent) self.log.debug("Contents of %s: %s" % (silentcfg, silent)) # workaround for mktmp: create tmp dir and use it tmpdir = os.path.join(self.cfg['start_dir'], 'mytmpdir') try: os.makedirs(tmpdir) except: self.log.exception("Directory %s can't be created" % (tmpdir)) tmppathopt = '' if self.cfg['usetmppath']: env.setvar('TMP_PATH', tmpdir) tmppathopt = "-t %s" % tmpdir # set some extra env variables env.setvar('LOCAL_INSTALL_VERBOSE', '1') env.setvar('VERBOSE_MODE', '1') env.setvar('INSTALL_PATH', self.installdir) # perform installation cmd = "./install.sh %s -s %s" % (tmppathopt, silentcfg) return run_cmd(cmd, log_all=True, simple=True)
def configure_step(self): """Configure WIEN2k build by patching siteconfig_lapw script and running it.""" self.cfgscript = "siteconfig_lapw" # patch config file first # toolchain-dependent values comp_answer = None if self.toolchain.comp_family( ) == toolchain.INTELCOMP: #@UndefinedVariable if LooseVersion( get_software_version("icc")) >= LooseVersion("2011"): comp_answer = 'I' # Linux (Intel ifort 12.0 compiler + mkl ) else: comp_answer = "K1" # Linux (Intel ifort 11.1 compiler + mkl ) elif self.toolchain.comp_family( ) == toolchain.GCC: #@UndefinedVariable comp_answer = 'V' # Linux (gfortran compiler + gotolib) else: raise EasyBuildError( "Failed to determine toolchain-dependent answers.") # libraries rlibs = "%s %s" % (os.getenv('LIBLAPACK_MT'), self.toolchain.get_flag('openmp')) rplibs = [os.getenv('LIBSCALAPACK_MT'), os.getenv('LIBLAPACK_MT')] fftwver = get_software_version('FFTW') if fftwver: suff = '' if LooseVersion(fftwver) >= LooseVersion("3"): suff = '3' rplibs.insert(0, "-lfftw%(suff)s_mpi -lfftw%(suff)s" % {'suff': suff}) else: rplibs.append(os.getenv('LIBFFT')) rplibs = ' '.join(rplibs) d = { 'FC': '%s %s' % (os.getenv('F90'), os.getenv('FFLAGS')), 'MPF': "%s %s" % (os.getenv('MPIF90'), os.getenv('FFLAGS')), 'CC': os.getenv('CC'), 'LDFLAGS': '$(FOPT) %s ' % os.getenv('LDFLAGS'), 'R_LIBS': rlibs, # libraries for 'real' (not 'complex') binary 'RP_LIBS': rplibs, # libraries for 'real' parallel binary 'MPIRUN': '', } for line in fileinput.input(self.cfgscript, inplace=1, backup='.orig'): # set config parameters for (k, v) in d.items(): regexp = re.compile('^([a-z0-9]+):%s:.*' % k) res = regexp.search(line) if res: # we need to exclude the lines with 'current', otherwise we break the script if not res.group(1) == "current": line = regexp.sub('\\1:%s:%s' % (k, v), line) # avoid exit code > 0 at end of configuration line = re.sub('(\s+)exit 1', '\\1exit 0', line) sys.stdout.write(line) # set correct compilers env.setvar('bin', os.getcwd()) dc = { 'COMPILERC': os.getenv('CC'), 'COMPILER': os.getenv('F90'), 'COMPILERP': os.getenv('MPIF90'), } for (k, v) in dc.items(): write_file(k, v) # configure with patched configure script self.log.debug('%s part I (configure)' % self.cfgscript) cmd = "./%s" % self.cfgscript qanda = { 'Press RETURN to continue': '', 'Your compiler:': '', 'Hit Enter to continue': '', 'Remote shell (default is ssh) =': '', 'and you need to know details about your installed mpi ..) (y/n)': 'y', 'Q to quit Selection:': 'Q', 'A Compile all programs (suggested) Q Quit Selection:': 'Q', ' Please enter the full path of the perl program: ': '', 'continue or stop (c/s)': 'c', '(like taskset -c). Enter N / your_specific_command:': 'N', } if LooseVersion(self.version) >= LooseVersion("13"): fftw_root = get_software_root('FFTW') if fftw_root: fftw_maj = get_software_version('FFTW').split('.')[0] fftw_spec = 'FFTW%s' % fftw_maj else: raise EasyBuildError("Required FFTW dependency is missing") qanda.update({ '(not updated) Selection:': comp_answer, 'Shared Memory Architecture? (y/N):': 'N', 'Set MPI_REMOTE to 0 / 1:': '0', 'You need to KNOW details about your installed MPI and FFTW ) (y/n)': 'y', 'Please specify whether you want to use FFTW3 (default) or FFTW2 (FFTW3 / FFTW2):': fftw_spec, 'Please specify the ROOT-path of your FFTW installation (like /opt/fftw3):': fftw_root, 'is this correct? enter Y (default) or n:': 'Y', }) else: qanda.update({ 'compiler) Selection:': comp_answer, 'Shared Memory Architecture? (y/n):': 'n', 'If you are using mpi2 set MPI_REMOTE to 0 Set MPI_REMOTE to 0 / 1:': '0', 'Do you have MPI and Scalapack installed and intend to run ' \ 'finegrained parallel? (This is usefull only for BIG cases ' \ '(50 atoms and more / unit cell) and you need to know details ' \ 'about your installed mpi and fftw ) (y/n)': 'y', }) no_qa = [ 'You have the following mkl libraries in %s :' % os.getenv('MKLROOT'), "%s[ \t]*.*" % os.getenv('MPIF90'), "%s[ \t]*.*" % os.getenv('F90'), "%s[ \t]*.*" % os.getenv('CC'), ".*SRC_.*", "Please enter the full path of the perl program:", ] std_qa = { r'S\s+Save and Quit[\s\n]+To change an item select option.[\s\n]+Selection:': 'S', 'Recommended setting for parallel f90 compiler: .* Current selection: Your compiler:': os.getenv('MPIF90'), } run_cmd_qa(cmd, qanda, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True) # post-configure patches parallel_options = {} parallel_options_fp = os.path.join(self.cfg['start_dir'], 'parallel_options') if self.cfg['wien_mpirun']: parallel_options.update({'WIEN_MPIRUN': self.cfg['wien_mpirun']}) if self.cfg['taskset'] is None: self.cfg['taskset'] = 'no' parallel_options.update({'TASKSET': self.cfg['taskset']}) for opt in ['use_remote', 'mpi_remote', 'wien_granularity']: parallel_options.update({opt.upper(): int(self.cfg[opt])}) write_file( parallel_options_fp, '\n'.join( ['setenv %s "%s"' % tup for tup in parallel_options.items()])) if self.cfg['remote']: if self.cfg['remote'] == 'pbsssh': extratxt = '\n'.join([ '', "set remote = pbsssh", "setenv PBSSSHENV 'LD_LIBRARY_PATH PATH'", '', ]) write_file(parallel_options_fp, extratxt, append=True) else: raise EasyBuildError("Don't know how to handle remote %s", self.cfg['remote']) self.log.debug("Patched file %s: %s", parallel_options_fp, read_file(parallel_options_fp))
def mpi_cmd_for(self, cmd, nr_ranks): """Construct an MPI command for the given command and number of ranks.""" # parameter values for mpirun command params = { 'nr_ranks': nr_ranks, 'cmd': cmd, } # different known mpirun commands mpirun_n_cmd = "mpirun -n %(nr_ranks)d %(cmd)s" mpi_cmds = { toolchain.OPENMPI: mpirun_n_cmd, # @UndefinedVariable toolchain.QLOGICMPI: "mpirun -H localhost -np %(nr_ranks)d %(cmd)s", # @UndefinedVariable toolchain.INTELMPI: "mpirun %(mpdbf)s %(nodesfile)s -np %(nr_ranks)d %(cmd)s", # @UndefinedVariable toolchain.MVAPICH2: mpirun_n_cmd, # @UndefinedVariable toolchain.MPICH: mpirun_n_cmd, # @UndefinedVariable toolchain.MPICH2: mpirun_n_cmd, # @UndefinedVariable } mpi_family = self.mpi_family() # Intel MPI mpirun needs more work if mpi_family == toolchain.INTELMPI: # @UndefinedVariable # set temporary dir for mdp # note: this needs to be kept *short*, to avoid mpirun failing with "socket.error: AF_UNIX path too long" # exact limit is unknown, but ~20 characters seems to be OK env.setvar('I_MPI_MPD_TMPDIR', tempfile.gettempdir()) mpd_tmpdir = os.environ['I_MPI_MPD_TMPDIR'] if len(mpd_tmpdir) > 20: self.log.warning("$I_MPI_MPD_TMPDIR should be (very) short to avoid problems: %s" % mpd_tmpdir) # temporary location for mpdboot and nodes files tmpdir = tempfile.mkdtemp(prefix='mpi_cmd_for-') # set PBS_ENVIRONMENT, so that --file option for mpdboot isn't stripped away env.setvar('PBS_ENVIRONMENT', "PBS_BATCH_MPI") # make sure we're always using mpd as process manager # only required for/picked up by Intel MPI v4.1 or higher, no harm done for others env.setvar('I_MPI_PROCESS_MANAGER', 'mpd') # create mpdboot file fn = os.path.join(tmpdir, 'mpdboot') try: if os.path.exists(fn): os.remove(fn) write_file(fn, "localhost ifhn=localhost") except OSError, err: raise EasyBuildError("Failed to create file %s: %s", fn, err) params.update({'mpdbf': "--file=%s" % fn}) # create nodes file fn = os.path.join(tmpdir, 'nodes') try: if os.path.exists(fn): os.remove(fn) write_file(fn, "localhost\n" * nr_ranks) except OSError, err: raise EasyBuildError("Failed to create file %s: %s", fn, err)
def get_system_libs(self): """ Get list of dependencies for $TF_SYSTEM_LIBS Returns a tuple of lists: $TF_SYSTEM_LIBS names, include paths, library paths """ dependency_mapping, python_mapping = get_system_libs_for_version( self.version) # Some TF dependencies require both a (usually C++) dependency and a Python package deps_with_python_pkg = set(tf_name for tf_name in dependency_mapping.values() if tf_name in python_mapping.values()) system_libs = [] cpaths = [] libpaths = [] ignored_system_deps = [] # Check direct dependencies dep_names = set(dep['name'] for dep in self.cfg.dependencies()) for dep_name, tf_name in sorted(dependency_mapping.items(), key=lambda i: i[0].lower()): if dep_name in dep_names: if tf_name in deps_with_python_pkg: pkg_name = next(cur_pkg_name for cur_pkg_name, cur_tf_name in python_mapping.items() if cur_tf_name == tf_name) # Simply ignore. Error reporting is done in the other loop if not self.python_pkg_exists(pkg_name): continue system_libs.append(tf_name) # When using cURL (which uses the system OpenSSL), we also need to use "boringssl" # which essentially resolves to using OpenSSL as the API and library names are compatible if dep_name == 'cURL': system_libs.append('boringssl') sw_root = get_software_root(dep_name) # Dependency might be filtered via --filter-deps. In that case assume globally installed version if not sw_root: continue incpath = os.path.join(sw_root, 'include') if os.path.exists(incpath): cpaths.append(incpath) if dep_name == 'JsonCpp' and LooseVersion( self.version) < LooseVersion('2.3'): # Need to add the install prefix or patch the sources: # https://github.com/tensorflow/tensorflow/issues/42303 cpaths.append(sw_root) if dep_name == 'protobuf': if LooseVersion(self.version) < LooseVersion('2.4'): # Need to set INCLUDEDIR as TF wants to symlink files from there: # https://github.com/tensorflow/tensorflow/issues/37835 env.setvar('INCLUDEDIR', incpath) else: env.setvar('PROTOBUF_INCLUDE_PATH', incpath) libpath = get_software_libdir(dep_name) if libpath: libpaths.append(os.path.join(sw_root, libpath)) else: ignored_system_deps.append('%s (Dependency %s)' % (tf_name, dep_name)) for pkg_name, tf_name in sorted(python_mapping.items(), key=lambda i: i[0].lower()): if self.python_pkg_exists(pkg_name): # If it is in deps_with_python_pkg we already added it if tf_name not in deps_with_python_pkg: system_libs.append(tf_name) else: ignored_system_deps.append('%s (Python package %s)' % (tf_name, pkg_name)) if ignored_system_deps: self.log.warning( 'For the following $TF_SYSTEM_LIBS dependencies TensorFlow will download a copy ' + 'because an EB dependency was not found: \n%s\n' + 'EC Dependencies: %s\n' + 'Installed Python packages: %s\n', ', '.join(ignored_system_deps), ', '.join(dep_names), ', '.join(self.get_installed_python_packages())) else: self.log.info( "All known TensorFlow $TF_SYSTEM_LIBS dependencies resolved via EasyBuild!" ) return system_libs, cpaths, libpaths
def test_cases_step(self): # Make PyTorch tests not use the user home env.setvar('XDG_CACHE_HOME', os.path.join(self.tmpdir, '.cache')) super(EB_PyTorch, self).test_cases_step()
def configure_step(self): """Apply the necessary CMake config opts.""" if LooseVersion(self.version) < LooseVersion('19'): # Configuring Amber <19 is done in install step. return # CMake will search a previous install directory for Amber-compiled libs. We will therefore # manually remove the install directory prior to configuration. remove_dir(self.installdir) external_libs_list = [] mpiroot = get_software_root(self.toolchain.MPI_MODULE_NAME[0]) if mpiroot and self.toolchain.options.get('usempi', None): self.with_mpi = True self.cfg.update('configopts', '-DMPI=TRUE') if self.toolchain.options.get('openmp', None): self.cfg.update('configopts', '-DOPENMP=TRUE') cudaroot = get_software_root('CUDA') if cudaroot: self.with_cuda = True self.cfg.update('configopts', '-DCUDA=TRUE') if get_software_root('NCCL'): self.cfg.update('configopts', '-DNCCL=TRUE') external_libs_list.append('nccl') pythonroot = get_software_root('Python') if pythonroot: self.cfg.update('configopts', '-DDOWNLOAD_MINICONDA=FALSE') self.cfg.update( 'configopts', '-DPYTHON_EXECUTABLE=%s' % os.path.join(pythonroot, 'bin', 'python')) self.pylibdir = det_pylibdir() pythonpath = os.environ.get('PYTHONPATH', '') env.setvar( 'PYTHONPATH', os.pathsep.join( [os.path.join(self.installdir, self.pylibdir), pythonpath])) if get_software_root('FFTW'): external_libs_list.append('fftw') if get_software_root('netCDF'): external_libs_list.append('netcdf') if get_software_root('netCDF-Fortran'): external_libs_list.append('netcdf-fortran') if get_software_root('zlib'): external_libs_list.append('zlib') if get_software_root('Boost'): external_libs_list.append('boost') if get_software_root('PnetCDF'): external_libs_list.append('pnetcdf') # Force libs for available deps (see cmake/3rdPartyTools.cmake in Amber source for list of 3rd party libs) # This provides an extra layer of checking but should already be handled by TRUST_SYSTEM_LIBS=TRUE external_libs = ";".join(external_libs_list) self.cfg.update('configopts', "-DFORCE_EXTERNAL_LIBS='%s'" % external_libs) if get_software_root('FFTW') or get_software_root('imkl'): self.cfg.update('configopts', '-DUSE_FFT=TRUE') # Set standard compile options self.cfg.update('configopts', '-DCHECK_UPDATES=FALSE') self.cfg.update('configopts', '-DAPPLY_UPDATES=FALSE') self.cfg.update('configopts', '-DTRUST_SYSTEM_LIBS=TRUE') self.cfg.update('configopts', '-DCOLOR_CMAKE_MESSAGES=FALSE') # Amber recommend running the tests from the sources, rather than putting in installation dir # due to size. We handle tests under the install step self.cfg.update('configopts', '-DINSTALL_TESTS=FALSE') self.cfg.update('configopts', '-DCOMPILER=AUTO') # configure using cmake super(EB_Amber, self).configure_step()
def build_step(self): """Custom build procedure for TensorFlow.""" # pre-create target installation directory mkdir(os.path.join(self.installdir, self.pylibdir), parents=True) # This seems to be no longer required since at least 2.0, likely also for older versions if LooseVersion(self.version) < LooseVersion('2.0'): self.patch_crosstool_files() # compose "bazel build" command with all its options... cmd = [ self.cfg['prebuildopts'], 'bazel', '--output_base=%s' % self.output_base_dir, '--install_base=%s' % self.install_base_dir, '--output_user_root=%s' % self.output_user_root_dir, 'build', ] # build with optimization enabled # cfr. https://docs.bazel.build/versions/master/user-manual.html#flag--compilation_mode cmd.append('--compilation_mode=opt') # select 'opt' config section (this is *not* the same as --compilation_mode=opt!) # https://docs.bazel.build/versions/master/user-manual.html#flag--config cmd.append('--config=opt') # make Bazel print full command line + make it verbose on failures # https://docs.bazel.build/versions/master/user-manual.html#flag--subcommands # https://docs.bazel.build/versions/master/user-manual.html#flag--verbose_failures cmd.extend(['--subcommands', '--verbose_failures']) # Disable support of AWS platform via config switch introduced in 1.12.1 if LooseVersion(self.version) >= LooseVersion('1.12.1'): cmd.append('--config=noaws') # Bazel seems to not be able to handle a large amount of parallel jobs, e.g. 176 on some Power machines, # and will hang forever building the TensorFlow package. # So limit to something high but still reasonable while allowing ECs to overwrite it parallel = self.cfg['parallel'] if self.cfg['maxparallel'] is None: parallel = min(parallel, 64) cmd.append('--jobs=%s' % parallel) if self.toolchain.options.get('pic', None): cmd.append('--copt="-fPIC"') # include install location of Python packages in $PYTHONPATH, # and specify that value of $PYTHONPATH should be passed down into Bazel build environment; # this is required to make sure that Python packages included as extensions are found at build time; # see also https://github.com/tensorflow/tensorflow/issues/22395 pythonpath = os.getenv('PYTHONPATH', '') env.setvar( 'PYTHONPATH', os.pathsep.join( [os.path.join(self.installdir, self.pylibdir), pythonpath])) # Make TF find our modules. LD_LIBRARY_PATH gets automatically added by configure.py cpaths, libpaths = self.system_libs_info[1:] if cpaths: cmd.append("--action_env=CPATH='%s'" % ':'.join(cpaths)) if libpaths: cmd.append("--action_env=LIBRARY_PATH='%s'" % ':'.join(libpaths)) cmd.append('--action_env=PYTHONPATH') # Also export $EBPYTHONPREFIXES to handle the multi-deps python setup # See https://github.com/easybuilders/easybuild-easyblocks/pull/1664 if 'EBPYTHONPREFIXES' in os.environ: cmd.append('--action_env=EBPYTHONPREFIXES') # Ignore user environment for Python cmd.append('--action_env=PYTHONNOUSERSITE=1') # use same configuration for both host and target programs, which can speed up the build # only done when optarch is enabled, since this implicitely assumes that host and target platform are the same # see https://docs.bazel.build/versions/master/guide.html#configurations if self.toolchain.options.get('optarch'): cmd.append('--distinct_host_configuration=false') cmd.append(self.cfg['buildopts']) # TF 2 (final) sets this in configure if LooseVersion(self.version) < LooseVersion('2.0'): if get_software_root('CUDA'): cmd.append('--config=cuda') # if mkl-dnn is listed as a dependency it is used. Otherwise downloaded if with_mkl_dnn is true mkl_root = get_software_root('mkl-dnn') if mkl_root: cmd.extend(['--config=mkl']) cmd.insert(0, "export TF_MKL_DOWNLOAD=0 &&") cmd.insert(0, "export TF_MKL_ROOT=%s &&" % mkl_root) elif self.cfg['with_mkl_dnn']: # this makes TensorFlow use mkl-dnn (cfr. https://github.com/01org/mkl-dnn) cmd.extend(['--config=mkl']) cmd.insert(0, "export TF_MKL_DOWNLOAD=1 && ") # specify target of the build command as last argument cmd.append('//tensorflow/tools/pip_package:build_pip_package') run_cmd(' '.join(cmd), log_all=True, simple=True, log_ok=True) # run generated 'build_pip_package' script to build the .whl cmd = "bazel-bin/tensorflow/tools/pip_package/build_pip_package %s" % self.builddir run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def configure_step(self): """Configure numpy build by composing site.cfg contents.""" # see e.g. https://github.com/numpy/numpy/pull/2809/files self.sitecfg = '\n'.join([ "[DEFAULT]", "library_dirs = %(libs)s", "include_dirs= %(includes)s", "search_static_first=True", ]) if get_software_root("imkl"): if self.toolchain.comp_family() == toolchain.GCC: # see https://software.intel.com/en-us/articles/numpyscipy-with-intel-mkl, # section Building with GNU Compiler chain extrasiteconfig = '\n'.join([ "[mkl]", "lapack_libs = ", "mkl_libs = mkl_rt", ]) else: extrasiteconfig = '\n'.join([ "[mkl]", "lapack_libs = %(lapack)s", "mkl_libs = %(blas)s", ]) else: # [atlas] the only real alternative, even for non-ATLAS BLAS libs (e.g., OpenBLAS, ACML, ...) # using only the [blas] and [lapack] sections results in sub-optimal builds that don't provide _dotblas.so; # it does require a CBLAS interface to be available for the BLAS library being used # e.g. for ACML, the CBLAS module providing a C interface needs to be used extrasiteconfig = '\n'.join([ "[atlas]", "atlas_libs = %(lapack)s", "[lapack]", "lapack_libs = %(lapack)s", # required by scipy, that uses numpy's site.cfg ]) blas = None lapack = None fft = None if get_software_root("imkl"): # with IMKL, no spaces and use '-Wl:' # redefine 'Wl,' to 'Wl:' so that the patch file can do its job def get_libs_for_mkl(varname): """Get list of libraries as required for MKL patch file.""" libs = self.toolchain.variables['LIB%s' % varname].copy() libs.try_remove(['pthread', 'dl']) tweaks = { 'prefix': '', 'prefix_begin_end': '-Wl:', 'separator': ',', 'separator_begin_end': ',', } libs.try_function_on_element('change', kwargs=tweaks) libs.SEPARATOR = ',' return str( libs ) # str causes list concatenation and adding prefixes & separators blas = get_libs_for_mkl('BLAS_MT') lapack = get_libs_for_mkl('LAPACK_MT') fft = get_libs_for_mkl('FFT') # make sure the patch file is there # we check for a typical characteristic of a patch file that cooperates with the above # not fool-proof, but better than enforcing a particular patch filename patch_found = False patch_wl_regex = re.compile(r"replace\(':',\s*','\)") for patch in self.patches: # patches are either strings (extension) or dicts (easyblock) if isinstance(patch, dict): patch = patch['path'] if patch_wl_regex.search(read_file(patch)): patch_found = True break if not patch_found: raise EasyBuildError( "Building numpy on top of Intel MKL requires a patch to " "handle -Wl linker flags correctly, which doesn't seem to be there." ) else: # unless Intel MKL is used, $ATLAS should be set to take full control, # and to make sure a fully optimized version is built, including _dotblas.so # which is critical for decent performance of the numpy.dot (matrix dot product) function! env.setvar('ATLAS', '1') lapack = ', '.join([ x for x in self.toolchain.get_variable('LIBLAPACK_MT', typ=list) if x != "pthread" ]) fft = ', '.join(self.toolchain.get_variable('LIBFFT', typ=list)) libs = ':'.join(self.toolchain.get_variable('LDFLAGS', typ=list)) includes = ':'.join(self.toolchain.get_variable('CPPFLAGS', typ=list)) # CBLAS is required for ACML, because it doesn't offer a C interface to BLAS if get_software_root('ACML'): cblasroot = get_software_root('CBLAS') if cblasroot: lapack = ', '.join([lapack, "cblas"]) cblaslib = os.path.join(cblasroot, 'lib') # with numpy as extension, CBLAS might not be included in LDFLAGS because it's not part of a toolchain if cblaslib not in libs: libs = ':'.join([libs, cblaslib]) else: raise EasyBuildError( "CBLAS is required next to ACML to provide a C interface to BLAS, " "but it's not loaded.") if fft: extrasiteconfig += "\n[fftw]\nlibraries = %s" % fft suitesparseroot = get_software_root('SuiteSparse') if suitesparseroot: amddir = os.path.join(suitesparseroot, 'AMD') umfpackdir = os.path.join(suitesparseroot, 'UMFPACK') if not os.path.exists(amddir) or not os.path.exists(umfpackdir): raise EasyBuildError( "Expected SuiteSparse subdirectories are not both there: %s, %s", amddir, umfpackdir) else: extrasiteconfig += '\n'.join([ "[amd]", "library_dirs = %s" % os.path.join(amddir, 'Lib'), "include_dirs = %s" % os.path.join(amddir, 'Include'), "amd_libs = amd", "[umfpack]", "library_dirs = %s" % os.path.join(umfpackdir, 'Lib'), "include_dirs = %s" % os.path.join(umfpackdir, 'Include'), "umfpack_libs = umfpack", ]) self.sitecfg = '\n'.join([self.sitecfg, extrasiteconfig]) self.sitecfg = self.sitecfg % { 'blas': blas, 'lapack': lapack, 'libs': libs, 'includes': includes, } super(EB_numpy, self).configure_step() # check configuration (for debugging purposes) cmd = "%s setup.py config" % self.python_cmd run_cmd(cmd, log_all=True, simple=True)
def prepare_rpath_wrappers(self, rpath_filter_dirs=None, rpath_include_dirs=None): """ Put RPATH wrapper script in place for compiler and linker commands :param rpath_filter_dirs: extra directories to include in RPATH filter (e.g. build dir, tmpdir, ...) """ self.log.experimental( "Using wrapper scripts for compiler/linker commands that enforce RPATH linking" ) if get_os_type() == LINUX: self.log.info("Putting RPATH wrappers in place...") else: raise EasyBuildError( "RPATH linking is currently only supported on Linux") wrapper_dir = os.path.join(tempfile.mkdtemp(), RPATH_WRAPPERS_SUBDIR) # must also wrap compilers commands, required e.g. for Clang ('gcc' on OS X)? c_comps, fortran_comps = self.compilers() rpath_args_py = find_eb_script('rpath_args.py') rpath_wrapper_template = find_eb_script('rpath_wrapper_template.sh.in') # prepend location to wrappers to $PATH setvar('PATH', '%s:%s' % (wrapper_dir, os.getenv('PATH'))) # figure out list of patterns to use in rpath filter rpath_filter = build_option('rpath_filter') if rpath_filter is None: rpath_filter = ['/lib.*', '/usr.*'] self.log.debug( "No general RPATH filter specified, falling back to default: %s", rpath_filter) rpath_filter = ','.join(rpath_filter + ['%s.*' % d for d in rpath_filter_dirs or []]) self.log.debug("Combined RPATH filter: '%s'", rpath_filter) rpath_include = ','.join(rpath_include_dirs or []) self.log.debug("Combined RPATH include paths: '%s'", rpath_include) # create wrappers for cmd in nub(c_comps + fortran_comps + ['ld', 'ld.gold', 'ld.bfd']): orig_cmd = which(cmd) if orig_cmd: # bail out early if command already is a wrapped; # this may occur when building extensions if self.is_rpath_wrapper(orig_cmd): self.log.info( "%s already seems to be an RPATH wrapper script, not wrapping it again!", orig_cmd) continue cmd_wrapper = os.path.join(wrapper_dir, cmd) # make *very* sure we don't wrap around ourselves and create a fork bomb... if os.path.exists(cmd_wrapper) and os.path.exists( orig_cmd) and os.path.samefile(orig_cmd, cmd_wrapper): raise EasyBuildError( "Refusing the create a fork bomb, which(%s) == %s", cmd, orig_cmd) # enable debug mode in wrapper script by specifying location for log file if build_option('debug'): rpath_wrapper_log = os.path.join( tempfile.gettempdir(), 'rpath_wrapper_%s.log' % cmd) else: rpath_wrapper_log = '/dev/null' # complete template script and put it in place cmd_wrapper_txt = read_file(rpath_wrapper_template) % { 'orig_cmd': orig_cmd, 'python': sys.executable, 'rpath_args_py': rpath_args_py, 'rpath_filter': rpath_filter, 'rpath_include': rpath_include, 'rpath_wrapper_log': rpath_wrapper_log, } write_file(cmd_wrapper, cmd_wrapper_txt) adjust_permissions(cmd_wrapper, stat.S_IXUSR) self.log.info("Wrapper script for %s: %s (log: %s)", orig_cmd, which(cmd), rpath_wrapper_log) else: self.log.debug( "Not installing RPATH wrapper for non-existing command '%s'", cmd)
def build_step(self): if self.iter_idx > 0: # call standard build_step for nvptx-tools and nvptx GCC return super(EB_GCC, self).build_step() if self.stagedbuild: # make and install stage 1 build of GCC paracmd = '' if self.cfg['parallel']: paracmd = "-j %s" % self.cfg['parallel'] cmd = "%s make %s %s" % (self.cfg['prebuildopts'], paracmd, self.cfg['buildopts']) run_cmd(cmd, log_all=True, simple=True) cmd = "make install %s" % (self.cfg['installopts']) run_cmd(cmd, log_all=True, simple=True) # register built GCC as compiler to use for stage 2/3 path = "%s/bin:%s" % (self.stage1installdir, os.getenv('PATH')) env.setvar('PATH', path) ld_lib_path = "%(dir)s/lib64:%(dir)s/lib:%(val)s" % { 'dir': self.stage1installdir, 'val': os.getenv('LD_LIBRARY_PATH') } env.setvar('LD_LIBRARY_PATH', ld_lib_path) # # STAGE 2: build GMP/PPL/CLooG for stage 3 # # create dir to build GMP/PPL/CLooG in stage2dir = "stage2_stuff" stage2prefix = self.create_dir(stage2dir) # prepare directories to build GMP/PPL/CLooG stage2_info = self.prep_extra_src_dirs("stage2", target_prefix=stage2prefix) configopts = stage2_info['configopts'] # build PPL and CLooG (GMP as dependency) for lib in ["gmp"] + self.with_dirs: self.log.debug("Building %s in stage 2" % lib) if lib == "gmp" or self.cfg['with%s' % lib]: libdir = os.path.join(stage2prefix, lib) try: os.chdir(libdir) except OSError as err: raise EasyBuildError("Failed to change to %s: %s", libdir, err) if lib == "gmp": cmd = "./configure --prefix=%s " % stage2prefix cmd += "--with-pic --disable-shared --enable-cxx " # ensure generic build when 'generic' is set to True or when --optarch=GENERIC is used # non-generic build can be enforced with generic=False if --optarch=GENERIC is used optarch_generic = build_option( 'optarch') == OPTARCH_GENERIC if self.cfg['generic'] or (optarch_generic and self.cfg['generic'] is not False): cmd += "--enable-fat " elif lib == "ppl": self.pplver = LooseVersion( stage2_info['versions']['ppl']) cmd = "./configure --prefix=%s --with-pic -disable-shared " % stage2prefix # only enable C/C++ interfaces (Java interface is sometimes troublesome) cmd += "--enable-interfaces='c c++' " # enable watchdog (or not) if self.pplver <= LooseVersion("0.11"): if self.cfg['pplwatchdog']: cmd += "--enable-watchdog " else: cmd += "--disable-watchdog " elif self.cfg['pplwatchdog']: raise EasyBuildError( "Enabling PPL watchdog only supported in PPL <= v0.11 ." ) # make sure GMP we just built is found cmd += "--with-gmp=%s " % stage2prefix elif lib == "isl": cmd = "./configure --prefix=%s --with-pic --disable-shared " % stage2prefix cmd += "--with-gmp=system --with-gmp-prefix=%s " % stage2prefix # ensure generic build when 'generic' is set to True or when --optarch=GENERIC is used # non-generic build can be enforced with generic=False if --optarch=GENERIC is used optarch_generic = build_option( 'optarch') == OPTARCH_GENERIC if self.cfg['generic'] or (optarch_generic and self.cfg['generic'] is not False): cmd += "--without-gcc-arch " elif lib == "cloog": self.cloogname = stage2_info['names']['cloog'] self.cloogver = LooseVersion( stage2_info['versions']['cloog']) v0_15 = LooseVersion("0.15") v0_16 = LooseVersion("0.16") cmd = "./configure --prefix=%s --with-pic --disable-shared " % stage2prefix # use ISL or PPL if self.cfg['clooguseisl']: if self.cfg['withisl']: self.log.debug("Using external ISL for CLooG") cmd += "--with-isl=system --with-isl-prefix=%s " % stage2prefix elif self.cloogver >= v0_16: self.log.debug("Using bundled ISL for CLooG") cmd += "--with-isl=bundled " else: raise EasyBuildError( "Using ISL is only supported in CLooG >= v0.16 (detected v%s).", self.cloogver) else: if self.cloogname == "cloog-ppl" and self.cloogver >= v0_15 and self.cloogver < v0_16: cmd += "--with-ppl=%s " % stage2prefix else: errormsg = "PPL only supported with CLooG-PPL v0.15.x (detected v%s)" % self.cloogver errormsg += "\nNeither using PPL or ISL-based ClooG, I'm out of options..." raise EasyBuildError(errormsg) # make sure GMP is found if self.cloogver >= v0_15 and self.cloogver < v0_16: cmd += "--with-gmp=%s " % stage2prefix elif self.cloogver >= v0_16: cmd += "--with-gmp=system --with-gmp-prefix=%s " % stage2prefix else: raise EasyBuildError( "Don't know how to specify location of GMP to configure of CLooG v%s.", self.cloogver) else: raise EasyBuildError( "Don't know how to configure for %s", lib) # configure self.run_configure_cmd(cmd) # build and 'install' cmd = "make %s" % paracmd run_cmd(cmd, log_all=True, simple=True) cmd = "make install" run_cmd(cmd, log_all=True, simple=True) if lib == "gmp": # make sure correct GMP is found libpath = os.path.join(stage2prefix, 'lib') incpath = os.path.join(stage2prefix, 'include') cppflags = os.getenv('CPPFLAGS', '') env.setvar( 'CPPFLAGS', "%s -L%s -I%s " % (cppflags, libpath, incpath)) # # STAGE 3: bootstrap build of final GCC (with PPL/CLooG support) # # create new obj dir and change into it self.create_dir("stage3_obj") # reconfigure for stage 3 build self.log.info( "Stage 2 of 3-staged build completed, continuing with stage 3 " "(with CLooG and/or PPL, ISL support enabled)...") stage3_info = self.prep_extra_src_dirs("stage3") configopts = stage3_info['configopts'] configopts += " --prefix=%(p)s --with-local-prefix=%(p)s" % { 'p': self.installdir } # enable bootstrapping for self-containment configopts += " --enable-bootstrap " # PPL config options if self.cfg['withppl']: # for PPL build and CLooG-PPL linking for lib in ["lib64", "lib"]: path = os.path.join(self.stage1installdir, lib, "libstdc++.a") if os.path.exists(path): libstdcxxpath = path break configopts += "--with-host-libstdcxx='-static-libgcc %s -lm' " % libstdcxxpath configopts += "--with-ppl=%s " % stage2prefix if self.pplver <= LooseVersion("0.11"): if self.cfg['pplwatchdog']: configopts += "--enable-watchdog " else: configopts += "--disable-watchdog " # CLooG config options if self.cfg['withcloog']: configopts += "--with-cloog=%s " % stage2prefix gccver = LooseVersion(self.version) if self.cfg['clooguseisl'] and self.cloogver >= LooseVersion( '0.16') and gccver < LooseVersion('4.8.0'): configopts += "--enable-cloog-backend=isl " if self.cfg['withisl']: configopts += "--with-isl=%s " % stage2prefix # configure cmd = "../configure %s %s" % (self.configopts, configopts) self.run_configure_cmd(cmd) # build with bootstrapping for self-containment if self.cfg['profiled']: self.cfg.update('buildopts', 'profiledbootstrap') else: self.cfg.update('buildopts', 'bootstrap') # call standard build_step super(EB_GCC, self).build_step()
'y'), # this one needs to be tried first! (r'.*fortran 90 compiler name .*\s*:\n\(suggestions\s*: .*\)', os.getenv('F90')), (r'.*fortran 90 compiler interfaced with .*\s*:\n\(suggestions\s*: .*\)', f90_seq), (r'Please type the ABSOLUTE name of .*library.*, or ignore\s*[:]*\s*[\n]*.*', ''), (r'Please .* to save this draft configuration file :\n.*', '%s.x' % self.conf_file), ]) no_qa = [ ".*ignored.", ] env.setvar('GMKTMP', self.builddir) env.setvar('GMKFILE', self.conf_file) run_cmd_qa("gmkfilemaker", qa, std_qa=stdqa, no_qa=no_qa) # set environment variables for installation dirs env.setvar('ROOTPACK', os.path.join(self.installdir, 'rootpack')) env.setvar('ROOTBIN', os.path.join(self.installdir, 'rootpack')) env.setvar('HOMEPACK', os.path.join(self.installdir, 'pack')) env.setvar('HOMEBIN', os.path.join(self.installdir, 'pack')) def build_step(self): """No separate build procedure for ALADIN (see install_step).""" pass
def configure_step(self): """ Configure for GCC build: - prepare extra source dirs (GMP, MPFR, MPC, ...) - create obj dir to build in (GCC doesn't like to be built in source dir) - add configure and make options, according to .eb spec file - decide whether or not to do a staged build (which is required to enable PPL/CLooG support) - set platform_lib based on config.guess output """ sysroot = build_option('sysroot') if sysroot: # based on changes made to GCC in Gentoo Prefix # https://gitweb.gentoo.org/repo/gentoo.git/tree/profiles/features/prefix/standalone/profile.bashrc # add --with-sysroot configure option, to instruct GCC to consider # value set for EasyBuild's --sysroot configuration option as the root filesystem of the operating system # (see https://gcc.gnu.org/install/configure.html) self.cfg.update('configopts', '--with-sysroot=%s' % sysroot) # avoid that --sysroot is passed to linker by patching value for SYSROOT_SPEC in gcc/gcc.c apply_regex_substitutions(os.path.join('gcc', 'gcc.c'), [('--sysroot=%R', '')]) # prefix dynamic linkers with sysroot # this patches lines like: # #define GLIBC_DYNAMIC_LINKER64 "/lib64/ld-linux-x86-64.so.2" gcc_config_headers = glob.glob( os.path.join('gcc', 'config', '*', '*linux*.h')) regex_subs = [('(_DYNAMIC_LINKER.*[":])/lib', r'\1%s/lib' % sysroot)] for gcc_config_header in gcc_config_headers: apply_regex_substitutions(gcc_config_header, regex_subs) # self.configopts will be reused in a 3-staged build, # configopts is only used in first configure self.configopts = self.cfg['configopts'] # I) prepare extra source dirs, e.g. for GMP, MPFR, MPC (if required), so GCC can build them stage1_info = self.prep_extra_src_dirs("stage1") configopts = stage1_info['configopts'] # II) update config options # enable specified language support if self.cfg['languages']: self.configopts += " --enable-languages=%s" % ','.join( self.cfg['languages']) if self.cfg['withnvptx']: if self.iter_idx == 0: self.configopts += " --without-cuda-driver" self.configopts += " --enable-offload-targets=nvptx-none" else: # register installed GCC as compiler to use nvptx path = "%s/bin:%s" % (self.installdir, os.getenv('PATH')) env.setvar('PATH', path) ld_lib_path = "%(dir)s/lib64:%(dir)s/lib:%(val)s" % { 'dir': self.installdir, 'val': os.getenv('LD_LIBRARY_PATH') } env.setvar('LD_LIBRARY_PATH', ld_lib_path) extra_source = {1: "nvptx-tools", 2: "newlib"}[self.iter_idx] extra_source_dirs = glob.glob( os.path.join(self.builddir, '%s-*' % extra_source)) if len(extra_source_dirs) != 1: raise EasyBuildError("Failed to isolate %s source dir" % extra_source) if self.iter_idx == 1: # compile nvptx-tools change_dir(extra_source_dirs[0]) else: # self.iter_idx == 2 # compile nvptx target compiler symlink(os.path.join(extra_source_dirs[0], 'newlib'), 'newlib') self.create_dir("build-nvptx-gcc") self.cfg.update('configopts', self.configopts) self.cfg.update( 'configopts', "--with-build-time-tools=%s/nvptx-none/bin" % self.installdir) self.cfg.update('configopts', "--target=nvptx-none") host_type = self.determine_build_and_host_type()[1] self.cfg.update( 'configopts', "--enable-as-accelerator-for=%s" % host_type) self.cfg.update('configopts', "--disable-sjlj-exceptions") self.cfg.update('configopts', "--enable-newlib-io-long-long") self.cfg['configure_cmd_prefix'] = '../' return super(EB_GCC, self).configure_step() # enable building of libiberty, if desired if self.cfg['withlibiberty']: self.configopts += " --enable-install-libiberty" # enable link-time-optimization (LTO) support, if desired if self.cfg['withlto']: self.configopts += " --enable-lto" else: self.configopts += " --disable-lto" # configure for a release build self.configopts += " --enable-checking=release " # enable multilib: allow both 32 and 64 bit if self.cfg['multilib']: glibc_32bit = [ "glibc.i686", # Fedora, RedHat-based "glibc.ppc", # "" on Power "libc6-dev-i386", # Debian-based "gcc-c++-32bit", # OpenSuSE, SLES ] if not any([check_os_dependency(dep) for dep in glibc_32bit]): raise EasyBuildError( "Using multilib requires 32-bit glibc (install one of %s, depending on your OS)", ', '.join(glibc_32bit)) self.configopts += " --enable-multilib --with-multilib-list=m32,m64" else: self.configopts += " --disable-multilib" # build both static and dynamic libraries (???) self.configopts += " --enable-shared=yes --enable-static=yes " # use POSIX threads self.configopts += " --enable-threads=posix " # enable plugin support self.configopts += " --enable-plugins " # use GOLD as default linker if self.cfg['use_gold_linker']: self.configopts += " --enable-gold=default --enable-ld --with-plugin-ld=ld.gold" else: self.configopts += " --enable-gold --enable-ld=default" # enable bootstrap build for self-containment (unless for staged build) if not self.stagedbuild: configopts += " --enable-bootstrap" else: configopts += " --disable-bootstrap" if self.stagedbuild: # # STAGE 1: configure GCC build that will be used to build PPL/CLooG # self.log.info( "Starting with stage 1 of 3-staged build to enable CLooG and/or PPL, ISL support..." ) self.stage1installdir = os.path.join(self.builddir, 'GCC_stage1_eb') configopts += " --prefix=%(p)s --with-local-prefix=%(p)s" % { 'p': self.stage1installdir } else: # unstaged build, so just run standard configure/make/make install # set prefixes self.log.info("Performing regular GCC build...") configopts += " --prefix=%(p)s --with-local-prefix=%(p)s" % { 'p': self.installdir } # prioritize lib over lib{64,32,x32} for all architectures by overriding default MULTILIB_OSDIRNAMES config # only do this when multilib is not enabled if self.cfg['prefer_lib_subdir'] and not self.cfg['multilib']: cfgfile = 'gcc/config/i386/t-linux64' multilib_osdirnames = "MULTILIB_OSDIRNAMES = m64=../lib:../lib64 m32=../lib:../lib32 mx32=../lib:../libx32" self.log.info("Patching MULTILIB_OSDIRNAMES in %s with '%s'", cfgfile, multilib_osdirnames) write_file(cfgfile, multilib_osdirnames, append=True) elif self.cfg['multilib']: self.log.info( "Not patching MULTILIB_OSDIRNAMES since use of --enable-multilib is enabled" ) # III) create obj dir to build in, and change to it # GCC doesn't like to be built in the source dir if self.stagedbuild: objdir = self.create_dir("stage1_obj") self.stage1prefix = objdir else: objdir = self.create_dir("obj") # IV) actual configure, but not on default path cmd = "../configure %s %s" % (self.configopts, configopts) self.run_configure_cmd(cmd) self.disable_lto_mpfr_old_gcc(objdir)
def configure_step(self): """Custom configure procedure for PyTorch.""" super(EB_PyTorch, self).configure_step() # Gather default options. Will be checked against (and can be overwritten by) custom_opts options = [ 'PYTORCH_BUILD_VERSION=' + self.version, 'PYTORCH_BUILD_NUMBER=1' ] # enable verbose mode when --debug is used (to show compiler commands) if build_option('debug'): options.append('VERBOSE=1') # Restrict parallelism options.append('MAX_JOBS=%s' % self.cfg['parallel']) # BLAS Interface if get_software_root('imkl'): options.append('BLAS=MKL') options.append('INTEL_MKL_DIR=$MKLROOT') elif LooseVersion(self.version) >= LooseVersion( '1.9.0') and get_software_root('BLIS'): options.append('BLAS=BLIS') options.append('BLIS_HOME=' + get_software_root('BLIS')) options.append('USE_MKLDNN_CBLAS=ON') elif get_software_root('OpenBLAS'): # This is what PyTorch defaults to if no MKL is found. # Make this explicit here to avoid it finding MKL from the system options.append('BLAS=Eigen') # Still need to set a BLAS lib to use. # Valid choices: mkl/open/goto/acml/atlas/accelerate/veclib/generic (+blis for 1.9+) options.append('WITH_BLAS=open') # Make sure this option is actually passed to CMake apply_regex_substitutions( os.path.join('tools', 'setup_helpers', 'cmake.py'), [("'BLAS',", "'BLAS', 'WITH_BLAS',")]) else: raise EasyBuildError( "Did not find a supported BLAS in dependencies. Don't know which BLAS lib to use" ) available_dependency_options = EB_PyTorch.get_dependency_options_for_version( self.version) dependency_names = set(dep['name'] for dep in self.cfg.dependencies()) not_used_dep_names = [] for enable_opt, dep_name in available_dependency_options: if dep_name is None: continue if dep_name in dependency_names: options.append(enable_opt) else: not_used_dep_names.append(dep_name) self.log.info( 'Did not enable options for the following dependencies as they are not used in the EC: %s', not_used_dep_names) # Use Infiniband by default # you can disable this by including 'USE_IBVERBS=0' in 'custom_opts' in the easyconfig file options.append('USE_IBVERBS=1') if get_software_root('CUDA'): options.append('USE_CUDA=1') cudnn_root = get_software_root('cuDNN') if cudnn_root: options.append('CUDNN_LIB_DIR=' + os.path.join(cudnn_root, 'lib64')) options.append('CUDNN_INCLUDE_DIR=' + os.path.join(cudnn_root, 'include')) nccl_root = get_software_root('NCCL') if nccl_root: options.append('USE_SYSTEM_NCCL=1') options.append('NCCL_INCLUDE_DIR=' + os.path.join(nccl_root, 'include')) # list of CUDA compute capabilities to use can be specifed in two ways (where (2) overrules (1)): # (1) in the easyconfig file, via the custom cuda_compute_capabilities; # (2) in the EasyBuild configuration, via --cuda-compute-capabilities configuration option; cuda_cc = build_option('cuda_compute_capabilities' ) or self.cfg['cuda_compute_capabilities'] if not cuda_cc: raise EasyBuildError( 'List of CUDA compute capabilities must be specified, either via ' 'cuda_compute_capabilities easyconfig parameter or via ' '--cuda-compute-capabilities') self.log.info( 'Compiling with specified list of CUDA compute capabilities: %s', ', '.join(cuda_cc)) # This variable is also used at runtime (e.g. for tests) and if it is not set PyTorch will automatically # determine the compute capability of a GPU in the system and use that which may fail tests if # it is to new for the used nvcc env.setvar('TORCH_CUDA_ARCH_LIST', ';'.join(cuda_cc)) else: # Disable CUDA options.append('USE_CUDA=0') if get_cpu_architecture() == POWER: # *NNPACK is not supported on Power, disable to avoid warnings options.extend([ 'USE_NNPACK=0', 'USE_QNNPACK=0', 'USE_PYTORCH_QNNPACK=0', 'USE_XNNPACK=0' ]) # Metal only supported on IOS which likely doesn't work with EB, so disabled options.append('USE_METAL=0') unique_options = self.cfg['custom_opts'] for option in options: name = option.split('=')[ 0] + '=' # Include the equals sign to avoid partial matches if not any(opt.startswith(name) for opt in unique_options): unique_options.append(option) self.cfg.update('prebuildopts', ' '.join(unique_options) + ' ') self.cfg.update('preinstallopts', ' '.join(unique_options) + ' ')
def prepare_rpath_wrappers(self, rpath_filter_dirs=None, rpath_include_dirs=None): """ Put RPATH wrapper script in place for compiler and linker commands :param rpath_filter_dirs: extra directories to include in RPATH filter (e.g. build dir, tmpdir, ...) """ if get_os_type() == LINUX: self.log.info("Putting RPATH wrappers in place...") else: raise EasyBuildError("RPATH linking is currently only supported on Linux") if rpath_filter_dirs is None: rpath_filter_dirs = [] # always include filter for 'stubs' library directory, # cfr. https://github.com/easybuilders/easybuild-framework/issues/2683 rpath_filter_dirs.append('.*/lib(64)?/stubs/?') # directory where all wrappers will be placed wrappers_dir = os.path.join(tempfile.mkdtemp(), RPATH_WRAPPERS_SUBDIR) # must also wrap compilers commands, required e.g. for Clang ('gcc' on OS X)? c_comps, fortran_comps = self.compilers() rpath_args_py = find_eb_script('rpath_args.py') rpath_wrapper_template = find_eb_script('rpath_wrapper_template.sh.in') # figure out list of patterns to use in rpath filter rpath_filter = build_option('rpath_filter') if rpath_filter is None: rpath_filter = ['/lib.*', '/usr.*'] self.log.debug("No general RPATH filter specified, falling back to default: %s", rpath_filter) rpath_filter = ','.join(rpath_filter + ['%s.*' % d for d in rpath_filter_dirs]) self.log.debug("Combined RPATH filter: '%s'", rpath_filter) rpath_include = ','.join(rpath_include_dirs or []) self.log.debug("Combined RPATH include paths: '%s'", rpath_include) # create wrappers for cmd in nub(c_comps + fortran_comps + ['ld', 'ld.gold', 'ld.bfd']): orig_cmd = which(cmd) if orig_cmd: # bail out early if command already is a wrapped; # this may occur when building extensions if self.is_rpath_wrapper(orig_cmd): self.log.info("%s already seems to be an RPATH wrapper script, not wrapping it again!", orig_cmd) continue # determine location for this wrapper # each wrapper is placed in its own subdirectory to enable $PATH filtering per wrapper separately # avoid '+' character in directory name (for example with 'g++' command), which can cause trouble # (see https://github.com/easybuilders/easybuild-easyconfigs/issues/7339) wrapper_dir_name = '%s_wrapper' % cmd.replace('+', 'x') wrapper_dir = os.path.join(wrappers_dir, wrapper_dir_name) cmd_wrapper = os.path.join(wrapper_dir, cmd) # make *very* sure we don't wrap around ourselves and create a fork bomb... if os.path.exists(cmd_wrapper) and os.path.exists(orig_cmd) and os.path.samefile(orig_cmd, cmd_wrapper): raise EasyBuildError("Refusing the create a fork bomb, which(%s) == %s", cmd, orig_cmd) # enable debug mode in wrapper script by specifying location for log file if build_option('debug'): rpath_wrapper_log = os.path.join(tempfile.gettempdir(), 'rpath_wrapper_%s.log' % cmd) else: rpath_wrapper_log = '/dev/null' # complete template script and put it in place cmd_wrapper_txt = read_file(rpath_wrapper_template) % { 'orig_cmd': orig_cmd, 'python': sys.executable, 'rpath_args_py': rpath_args_py, 'rpath_filter': rpath_filter, 'rpath_include': rpath_include, 'rpath_wrapper_log': rpath_wrapper_log, 'wrapper_dir': wrapper_dir, } write_file(cmd_wrapper, cmd_wrapper_txt) adjust_permissions(cmd_wrapper, stat.S_IXUSR) self.log.info("Wrapper script for %s: %s (log: %s)", orig_cmd, which(cmd), rpath_wrapper_log) # prepend location to this wrapper to $PATH setvar('PATH', '%s:%s' % (wrapper_dir, os.getenv('PATH'))) else: self.log.debug("Not installing RPATH wrapper for non-existing command '%s'", cmd)
def configure_step(self): """ Configure VMD for building. """ # make sure required dependencies are available deps = {} for dep in ['FLTK', 'Mesa', 'netCDF', 'Python', 'Tcl', 'Tk']: deps[dep] = get_software_root(dep) if deps[dep] is None: raise EasyBuildError("Required dependency %s is missing", dep) # optional dependencies for dep in ['ACTC', 'CUDA', 'OptiX']: deps[dep] = get_software_root(dep) # specify Tcl/Tk locations & libraries tclinc = os.path.join(deps['Tcl'], 'include') tcllib = os.path.join(deps['Tcl'], 'lib') env.setvar('TCL_INCLUDE_DIR', tclinc) env.setvar('TCL_LIBRARY_DIR', tcllib) env.setvar('TK_INCLUDE_DIR', os.path.join(deps['Tk'], 'include')) env.setvar('TK_LIBRARY_DIR', os.path.join(deps['Tk'], 'lib')) tclshortver = '.'.join(get_software_version('Tcl').split('.')[:2]) self.cfg.update('buildopts', 'TCLLDFLAGS="-ltcl%s"' % tclshortver) # Netcdf locations netcdfinc = os.path.join(deps['netCDF'], 'include') netcdflib = os.path.join(deps['netCDF'], 'lib') # Python locations pyshortver = '.'.join(get_software_version('Python').split('.')[:2]) env.setvar('PYTHON_INCLUDE_DIR', os.path.join(deps['Python'], 'include/python%s' % pyshortver)) pylibdir = det_pylibdir() python_libdir = os.path.join(deps['Python'], os.path.dirname(pylibdir)) env.setvar('PYTHON_LIBRARY_DIR', python_libdir) # numpy include location, easiest way to determine it is via numpy.get_include() out, ec = run_cmd("python -c 'import numpy; print numpy.get_include()'", simple=False) if ec: raise EasyBuildError("Failed to determine numpy include directory: %s", out) else: env.setvar('NUMPY_INCLUDE_DIR', out.strip()) # compiler commands self.cfg.update('buildopts', 'CC="%s"' % os.getenv('CC')) self.cfg.update('buildopts', 'CCPP="%s"' % os.getenv('CXX')) # plugins need to be built first (see http://www.ks.uiuc.edu/Research/vmd/doxygen/compiling.html) change_dir(os.path.join(self.builddir, 'plugins')) cmd = ' '.join([ 'make', 'LINUXAMD64', "TCLINC='-I%s'" % tclinc, "TCLLIB='-L%s'" % tcllib, "TCLLDFLAGS='-ltcl%s'" % tclshortver, "NETCDFINC='-I%s'" % netcdfinc, "NETCDFLIB='-L%s'" % netcdflib, self.cfg['buildopts'], ]) run_cmd(cmd, log_all=True, simple=False) # create plugins distribution plugindir = os.path.join(self.vmddir, 'plugins') env.setvar('PLUGINDIR', plugindir) self.log.info("Generating VMD plugins in %s", plugindir) run_cmd("make distrib %s" % self.cfg['buildopts'], log_all=True, simple=False) # explicitely mention whether or not we're building with CUDA/OptiX support if deps['CUDA']: self.log.info("Building with CUDA %s support", get_software_version('CUDA')) if deps['OptiX']: self.log.info("Building with Nvidia OptiX %s support", get_software_version('OptiX')) else: self.log.warn("Not building with Nvidia OptiX support!") else: self.log.warn("Not building with CUDA nor OptiX support!") # see http://www.ks.uiuc.edu/Research/vmd/doxygen/configure.html # LINUXAMD64: Linux 64-bit # LP64: build VMD as 64-bit binary # IMD: enable support for Interactive Molecular Dynamics (e.g. to connect to NAMD for remote simulations) # PTHREADS: enable support for POSIX threads # COLVARS: enable support for collective variables (related to NAMD/LAMMPS) # NOSILENT: verbose build command self.cfg.update('configopts', "LINUXAMD64 LP64 IMD PTHREADS COLVARS NOSILENT", allow_duplicate=False) # add additional configopts based on available dependencies for key in deps: if deps[key]: if key == 'Mesa': self.cfg.update('configopts', "OPENGL MESA", allow_duplicate=False) elif key == 'OptiX': self.cfg.update('configopts', "LIBOPTIX", allow_duplicate=False) elif key == 'Python': self.cfg.update('configopts', "PYTHON NUMPY", allow_duplicate=False) else: self.cfg.update('configopts', key.upper(), allow_duplicate=False) # configure for building with Intel compilers specifically if self.toolchain.comp_family() == toolchain.INTELCOMP: self.cfg.update('configopts', 'ICC', allow_duplicate=False) # specify install location using environment variables env.setvar('VMDINSTALLBINDIR', os.path.join(self.installdir, 'bin')) env.setvar('VMDINSTALLLIBRARYDIR', os.path.join(self.installdir, 'lib')) # configure in vmd-<version> directory change_dir(self.vmddir) run_cmd("%s ./configure %s" % (self.cfg['preconfigopts'], self.cfg['configopts'])) # change to 'src' subdirectory, ready for building change_dir(os.path.join(self.vmddir, 'src'))
class EB_ALADIN(EasyBlock): """Support for building/installing ALADIN.""" def __init__(self, *args, **kwargs): """Initialisation of custom class variables for ALADIN.""" super(EB_ALADIN, self).__init__(*args, **kwargs) self.conf_file = None self.conf_filepath = None self.rootpack_dir = 'UNKNOWN' self.orig_library_path = None @staticmethod def extra_options(): """Custom easyconfig parameters for ALADIN.""" extra_vars = { 'optional_extra_param': ['default value', "short description", CUSTOM], } return EasyBlock.extra_options(extra_vars) def configure_step(self): """Custom configuration procedure for ALADIN.""" # unset $LIBRARY_PATH set by modules of dependencies, because it may screw up linking if 'LIBRARY_PATH' in os.environ: self.log.debug("Unsetting $LIBRARY_PATH (was: %s)" % os.environ['LIBRARY_PATH']) self.orig_library_path = os.environ.pop('LIBRARY_PATH') # build auxiliary libraries auxlibs_dir = None my_gnu = None if self.toolchain.comp_family() == toolchain.GCC: my_gnu = 'y' # gfortran for var in ['CFLAGS', 'CXXFLAGS', 'F90FLAGS', 'FFLAGS']: flags = os.getenv(var) env.setvar(var, "%s -fdefault-real-8 -fdefault-double-8" % flags) self.log.info("Updated %s to '%s'" % (var, os.getenv(var))) elif self.toolchain.comp_family() == toolchain.INTELCOMP: my_gnu = 'i' # icc/ifort else: raise EasyBuildError("Don't know how to set 'my_gnu' variable in auxlibs build script.") self.log.info("my_gnu set to '%s'" % my_gnu) tmp_installroot = tempfile.mkdtemp(prefix='aladin_auxlibs_') try: cwd = os.getcwd() os.chdir(self.builddir) builddirs = os.listdir(self.builddir) auxlibs_dir = [x for x in builddirs if x.startswith('auxlibs_installer')][0] os.chdir(auxlibs_dir) auto_driver = 'driver_automatic' for line in fileinput.input(auto_driver, inplace=1, backup='.orig.eb'): line = re.sub(r"^(my_gnu\s*=\s*).*$", r"\1%s" % my_gnu, line) line = re.sub(r"^(my_r32\s*=\s*).*$", r"\1n", line) # always 64-bit real precision line = re.sub(r"^(my_readonly\s*=\s*).*$", r"\1y", line) # make libs read-only after build line = re.sub(r"^(my_installroot\s*=\s*).*$", r"\1%s" % tmp_installroot, line) sys.stdout.write(line) run_cmd("./%s" % auto_driver) os.chdir(cwd) except OSError, err: raise EasyBuildError("Failed to build ALADIN: %s", err) # build gmkpack, update PATH and set GMKROOT # we build gmkpack here because a config file is generated in the gmkpack isntall path try: gmkpack_dir = [x for x in builddirs if x.startswith('gmkpack')][0] os.chdir(os.path.join(self.builddir, gmkpack_dir)) qa = { 'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'n', } run_cmd_qa("./build_gmkpack", qa) os.chdir(cwd) paths = os.getenv('PATH').split(':') paths.append(os.path.join(self.builddir, gmkpack_dir, 'util')) env.setvar('PATH', ':'.join(paths)) env.setvar('GMKROOT', os.path.join(self.builddir, gmkpack_dir)) except OSError, err: raise EasyBuildError("Failed to build gmkpack: %s", err)
def configuremake_install_step(self): """Custom build, test & install procedure for Amber <20.""" # unset $LIBS since it breaks the build env.unset_env_vars(['LIBS']) # define environment variables for MPI, BLAS/LAPACK & dependencies mklroot = get_software_root('imkl') openblasroot = get_software_root('OpenBLAS') if mklroot: env.setvar('MKL_HOME', os.getenv('MKLROOT')) elif openblasroot: lapack = os.getenv('LIBLAPACK') if lapack is None: raise EasyBuildError( "LIBLAPACK (from OpenBLAS) not found in environment.") else: env.setvar('GOTO', lapack) mpiroot = get_software_root(self.toolchain.MPI_MODULE_NAME[0]) if mpiroot and self.toolchain.options.get('usempi', None): env.setvar('MPI_HOME', mpiroot) self.with_mpi = True if self.toolchain.mpi_family() == toolchain.INTELMPI: self.mpi_option = '-intelmpi' else: self.mpi_option = '-mpi' common_configopts = [self.cfg['configopts'], '--no-updates'] if get_software_root('X11') is None: common_configopts.append('-noX11') if self.name == 'Amber' and self.cfg['static']: common_configopts.append('-static') netcdfroot = get_software_root('netCDF') if netcdfroot: common_configopts.extend(["--with-netcdf", netcdfroot]) netcdf_fort_root = get_software_root('netCDF-Fortran') if netcdf_fort_root: common_configopts.extend(["--with-netcdf-fort", netcdf_fort_root]) pythonroot = get_software_root('Python') if pythonroot: common_configopts.extend( ["--with-python", os.path.join(pythonroot, 'bin', 'python')]) self.pylibdir = det_pylibdir() pythonpath = os.environ.get('PYTHONPATH', '') env.setvar( 'PYTHONPATH', os.pathsep.join( [os.path.join(self.installdir, self.pylibdir), pythonpath])) comp_fam = self.toolchain.comp_family() if comp_fam == toolchain.INTELCOMP: comp_str = 'intel' elif comp_fam == toolchain.GCC: comp_str = 'gnu' else: raise EasyBuildError( "Don't know how to compile with compiler family '%s' -- check EasyBlock?", comp_fam) # The NAB compiles need openmp flag if self.toolchain.options.get('openmp', None): env.setvar('CUSTOMBUILDFLAGS', self.toolchain.get_flag('openmp')) # compose list of build targets build_targets = [('', 'test')] if self.with_mpi: build_targets.append((self.mpi_option, 'test.parallel')) # hardcode to 4 MPI processes, minimal required to run all tests env.setvar('DO_PARALLEL', self.toolchain.mpi_cmd_for('', 4)) cudaroot = get_software_root('CUDA') if cudaroot: env.setvar('CUDA_HOME', cudaroot) self.with_cuda = True build_targets.append(('-cuda', 'test.cuda')) if self.with_mpi: build_targets.append( ("-cuda %s" % self.mpi_option, 'test.cuda_parallel')) ld_lib_path = os.environ.get('LD_LIBRARY_PATH', '') env.setvar( 'LD_LIBRARY_PATH', os.pathsep.join( [os.path.join(self.installdir, 'lib'), ld_lib_path])) for flag, testrule in build_targets: # configure cmd = "%s ./configure %s" % (self.cfg['preconfigopts'], ' '.join(common_configopts + [flag, comp_str])) (out, _) = run_cmd(cmd, log_all=True, simple=False) # build in situ using 'make install' # note: not 'build' super(EB_Amber, self).install_step() # test if self.cfg['runtest']: run_cmd("make %s" % testrule, log_all=True, simple=False) # clean, overruling the normal 'build' run_cmd("make clean")
class EB_PSI(CMakeMake): """ Support for building and installing PSI """ def __init__(self, *args, **kwargs): """Initialize class variables custom to PSI.""" super(EB_PSI, self).__init__(*args, **kwargs) self.psi_srcdir = None self.install_psi_objdir = None self.install_psi_srcdir = None @staticmethod def extra_options(): """Extra easyconfig parameters specific to PSI.""" extra_vars = { # always include running PSI unit tests (takes about 2h or less) 'runtest': ["tests TESTFLAGS='-u -q'", "Run tests included with PSI, without interruption.", BUILD], } return CMakeMake.extra_options(extra_vars) def configure_step(self): """ Configure build outside of source directory. """ try: objdir = os.path.join(self.builddir, 'obj') os.makedirs(objdir) os.chdir(objdir) except OSError, err: self.log.error("Failed to prepare for configuration of PSI build: %s" % err) env.setvar('F77FLAGS', os.getenv('F90FLAGS')) # In order to create new plugins with PSI, it needs to know the location of the source # and the obj dir after install. These env vars give that information to the configure script. self.psi_srcdir = os.path.basename(self.cfg['start_dir'].rstrip(os.sep)) self.install_psi_objdir = os.path.join(self.installdir, 'obj') self.install_psi_srcdir = os.path.join(self.installdir, self.psi_srcdir) env.setvar('PSI_OBJ_INSTALL_DIR', self.install_psi_objdir) env.setvar('PSI_SRC_INSTALL_DIR', self.install_psi_srcdir) # explicitely specify Python binary to use pythonroot = get_software_root('Python') if not pythonroot: self.log.error("Python module not loaded.") # Use EB Boost boostroot = get_software_root('Boost') if not boostroot: self.log.error("Boost module not loaded.") # pre 4.0b5, they were using autotools, on newer it's CMake if LooseVersion(self.version) <= LooseVersion("4.0b5"): env.setvar('PYTHON', os.path.join(pythonroot, 'bin', 'python')) env.setvar('USE_SYSTEM_BOOST', 'TRUE') if self.toolchain.options.get('usempi', None): # PSI doesn't require a Fortran compiler itself, but may require it to link to BLAS/LAPACK correctly # we should always specify the sequential Fortran compiler, # to avoid problems with -lmpi vs -lmpi_mt during linking fcompvar = 'F77_SEQ' else: fcompvar = 'F77' # update configure options # using multi-threaded BLAS/LAPACK is important for performance, # cfr. http://sirius.chem.vt.edu/psi4manual/latest/installfile.html#sec-install-iii opt_vars = [ ('cc', 'CC'), ('cxx', 'CXX'), ('fc', fcompvar), ('libdirs', 'LDFLAGS'), ('blas', 'LIBBLAS_MT'), ('lapack', 'LIBLAPACK_MT'), ] for (opt, var) in opt_vars: self.cfg.update('configopts', "--with-%s='%s'" % (opt, os.getenv(var))) # -DMPICH_IGNORE_CXX_SEEK dances around problem with order of stdio.h and mpi.h headers # both define SEEK_SET, this makes the one for MPI be ignored self.cfg.update('configopts', "--with-opt='%s -DMPICH_IGNORE_CXX_SEEK'" % os.getenv('CFLAGS')) # specify location of Boost self.cfg.update('configopts', "--with-boost=%s" % boostroot) # enable support for plugins self.cfg.update('configopts', "--with-plugins") ConfigureMake.configure_step(self, cmd_prefix=self.cfg['start_dir']) else: self.cfg['configopts'] += "-DPYTHON_INTERPRETER=%s " % os.path.join(pythonroot, 'bin', 'python') self.cfg['configopts'] += "-DCMAKE_BUILD_TYPE=Release " if self.toolchain.options.get('usempi', None): self.cfg['configopts'] += "-DENABLE_MPI=ON " if get_software_root('impi'): self.cfg['configopts'] += "-DENABLE_CSR=ON -DBLAS_TYPE=MKL " CMakeMake.configure_step(self, srcdir=self.cfg['start_dir'])
def configure_step(self): """Custom configuration procedure for Bazel.""" binutils_root = get_software_root('binutils') gcc_root = get_software_root('GCCcore') or get_software_root('GCC') gcc_ver = get_software_version('GCCcore') or get_software_version( 'GCC') # only patch Bazel scripts if binutils & GCC installation prefix could be determined if binutils_root and gcc_root: res = glob.glob( os.path.join(gcc_root, 'lib', 'gcc', '*', gcc_ver, 'include')) if res and len(res) == 1: gcc_lib_inc = res[0] else: raise EasyBuildError( "Failed to pinpoint location of GCC include files: %s", res) gcc_lib_inc_fixed = os.path.join(os.path.dirname(gcc_lib_inc), 'include-fixed') if not os.path.exists(gcc_lib_inc_fixed): raise EasyBuildError("Derived directory %s does not exist", gcc_lib_inc_fixed) gcc_cplusplus_inc = os.path.join(gcc_root, 'include', 'c++', gcc_ver) if not os.path.exists(gcc_cplusplus_inc): raise EasyBuildError("Derived directory %s does not exist", gcc_cplusplus_inc) # replace hardcoded paths in CROSSTOOL regex_subs = [ (r'-B/usr/bin', '-B%s' % os.path.join(binutils_root, 'bin')), (r'(cxx_builtin_include_directory:.*)/usr/lib/gcc', r'\1%s' % gcc_lib_inc), (r'(cxx_builtin_include_directory:.*)/usr/local/include', r'\1%s' % gcc_lib_inc_fixed), (r'(cxx_builtin_include_directory:.*)/usr/include', r'\1%s' % gcc_cplusplus_inc), ] for tool in ['ar', 'cpp', 'dwp', 'gcc', 'ld']: path = which(tool) if path: regex_subs.append((os.path.join('/usr', 'bin', tool), path)) else: raise EasyBuildError("Failed to determine path to '%s'", tool) apply_regex_substitutions( os.path.join('tools', 'cpp', 'CROSSTOOL'), regex_subs) # replace hardcoded paths in (unix_)cc_configure.bzl regex_subs = [ (r'-B/usr/bin', '-B%s' % os.path.join(binutils_root, 'bin')), (r'"/usr/bin', '"' + os.path.join(binutils_root, 'bin')), ] for conf_bzl in ['cc_configure.bzl', 'unix_cc_configure.bzl']: filepath = os.path.join('tools', 'cpp', conf_bzl) if os.path.exists(filepath): apply_regex_substitutions(filepath, regex_subs) else: self.log.info( "Not patching Bazel build scripts, installation prefix for binutils/GCC not found" ) # enable building in parallel env.setvar('EXTRA_BAZEL_ARGS', '--jobs=%d' % self.cfg['parallel'])
def configure_step(self): """Custom configuration procedure for ALADIN.""" # unset $LIBRARY_PATH set by modules of dependencies, because it may screw up linking if 'LIBRARY_PATH' in os.environ: self.log.debug("Unsetting $LIBRARY_PATH (was: %s)" % os.environ['LIBRARY_PATH']) self.orig_library_path = os.environ.pop('LIBRARY_PATH') # build auxiliary libraries auxlibs_dir = None my_gnu = None if self.toolchain.comp_family() == toolchain.GCC: my_gnu = 'y' # gfortran for var in ['CFLAGS', 'CXXFLAGS', 'F90FLAGS', 'FFLAGS']: flags = os.getenv(var) env.setvar(var, "%s -fdefault-real-8 -fdefault-double-8" % flags) self.log.info("Updated %s to '%s'" % (var, os.getenv(var))) elif self.toolchain.comp_family() == toolchain.INTELCOMP: my_gnu = 'i' # icc/ifort else: raise EasyBuildError( "Don't know how to set 'my_gnu' variable in auxlibs build script." ) self.log.info("my_gnu set to '%s'" % my_gnu) tmp_installroot = tempfile.mkdtemp(prefix='aladin_auxlibs_') try: cwd = os.getcwd() os.chdir(self.builddir) builddirs = os.listdir(self.builddir) auxlibs_dir = [ x for x in builddirs if x.startswith('auxlibs_installer') ][0] os.chdir(auxlibs_dir) auto_driver = 'driver_automatic' for line in fileinput.input(auto_driver, inplace=1, backup='.orig.eb'): line = re.sub(r"^(my_gnu\s*=\s*).*$", r"\1%s" % my_gnu, line) line = re.sub(r"^(my_r32\s*=\s*).*$", r"\1n", line) # always 64-bit real precision line = re.sub(r"^(my_readonly\s*=\s*).*$", r"\1y", line) # make libs read-only after build line = re.sub(r"^(my_installroot\s*=\s*).*$", r"\1%s" % tmp_installroot, line) sys.stdout.write(line) run_cmd("./%s" % auto_driver) os.chdir(cwd) except OSError as err: raise EasyBuildError("Failed to build ALADIN: %s", err) # build gmkpack, update PATH and set GMKROOT # we build gmkpack here because a config file is generated in the gmkpack isntall path try: gmkpack_dir = [x for x in builddirs if x.startswith('gmkpack')][0] os.chdir(os.path.join(self.builddir, gmkpack_dir)) qa = { 'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'n', } run_cmd_qa("./build_gmkpack", qa) os.chdir(cwd) paths = os.getenv('PATH').split(':') paths.append(os.path.join(self.builddir, gmkpack_dir, 'util')) env.setvar('PATH', ':'.join(paths)) env.setvar('GMKROOT', os.path.join(self.builddir, gmkpack_dir)) except OSError as err: raise EasyBuildError("Failed to build gmkpack: %s", err) # generate gmkpack configuration file self.conf_file = 'ALADIN_%s' % self.version self.conf_filepath = os.path.join(self.builddir, 'gmkpack_support', 'arch', '%s.x' % self.conf_file) try: if os.path.exists(self.conf_filepath): os.remove(self.conf_filepath) self.log.info("Removed existing gmpack config file %s" % self.conf_filepath) archdir = os.path.dirname(self.conf_filepath) if not os.path.exists(archdir): mkdir(archdir, parents=True) except OSError as err: raise EasyBuildError("Failed to remove existing file %s: %s", self.conf_filepath, err) mpich = 'n' known_mpi_libs = [ toolchain.MPICH, toolchain.MPICH2, toolchain.INTELMPI ] if self.toolchain.options.get( 'usempi', None) and self.toolchain.mpi_family() in known_mpi_libs: mpich = 'y' qpref = 'Please type the ABSOLUTE name of ' qsuff = ', or ignore (environment variables allowed) :' qsuff2 = ', or ignore : (environment variables allowed) :' comp_fam = self.toolchain.comp_family() if comp_fam == toolchain.GCC: gribdir = 'GNU' elif comp_fam == toolchain.INTELCOMP: gribdir = 'INTEL' else: raise EasyBuildError( "Don't know which grib lib dir to use for compiler %s", comp_fam) aux_lib_gribex = os.path.join(tmp_installroot, gribdir, 'lib', 'libgribex.a') aux_lib_ibm = os.path.join(tmp_installroot, gribdir, 'lib', 'libibmdummy.a') grib_api_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api.a') grib_api_f90_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api_f90.a') grib_api_inc = os.path.join(get_software_root('grib_api'), 'include') jasperlib = os.path.join(get_software_root('JasPer'), 'lib', 'libjasper.a') mpilib = os.path.join(os.getenv('MPI_LIB_DIR'), os.getenv('MPI_LIB_SHARED')) # netCDF netcdf = get_software_root('netCDF') netcdf_fortran = get_software_root('netCDF-Fortran') if netcdf: netcdfinc = os.path.join(netcdf, 'include') if netcdf_fortran: netcdflib = os.path.join(netcdf_fortran, get_software_libdir('netCDF-Fortran'), 'libnetcdff.a') else: netcdflib = os.path.join(netcdf, get_software_libdir('netCDF'), 'libnetcdff.a') if not os.path.exists(netcdflib): raise EasyBuildError("%s does not exist", netcdflib) else: raise EasyBuildError("netCDF(-Fortran) not available") ldpaths = [ldflag[2:] for ldflag in os.getenv('LDFLAGS').split(' ') ] # LDFLAGS have form '-L/path/to' lapacklibs = [] for lib in os.getenv('LAPACK_STATIC_LIBS').split(','): libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths] lapacklibs.append([ libpath for libpath in libpaths if os.path.exists(libpath) ][0]) lapacklib = ' '.join(lapacklibs) blaslibs = [] for lib in os.getenv('BLAS_STATIC_LIBS').split(','): libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths] blaslibs.append([ libpath for libpath in libpaths if os.path.exists(libpath) ][0]) blaslib = ' '.join(blaslibs) qa = { 'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'y', 'Do you want to setup your configuration file for MPICH (y/n) [n] ?': mpich, 'Please type the directory name where to find a dummy file mpif.h or ignore :': os.getenv('MPI_INC_DIR'), '%sthe library gribex or emos%s' % (qpref, qsuff2): aux_lib_gribex, '%sthe library ibm%s' % (qpref, qsuff): aux_lib_ibm, '%sthe library grib_api%s' % (qpref, qsuff): grib_api_lib, '%sthe library grib_api_f90%s' % (qpref, qsuff): grib_api_f90_lib, '%sthe JPEG auxilary library if enabled by Grib_api%s' % (qpref, qsuff2): jasperlib, '%sthe library netcdf%s' % (qpref, qsuff): netcdflib, '%sthe library lapack%s' % (qpref, qsuff): lapacklib, '%sthe library blas%s' % (qpref, qsuff): blaslib, '%sthe library mpi%s' % (qpref, qsuff): mpilib, '%sa MPI dummy library for serial executions, or ignore :' % qpref: '', 'Please type the directory name where to find grib_api headers, or ignore :': grib_api_inc, 'Please type the directory name where to find fortint.h or ignore :': '', 'Please type the directory name where to find netcdf headers, or ignore :': netcdfinc, 'Do you want to define CANARI (y/n) [y] ?': 'y', 'Please type the name of the script file used to generate a preprocessed blacklist file, or ignore :': '', 'Please type the name of the script file used to recover local libraries (gget), or ignore :': '', 'Please type the options to tune the gnu compilers, or ignore :': os.getenv('F90FLAGS'), } f90_seq = os.getenv('F90_SEQ') if not f90_seq: # F90_SEQ is only defined when usempi is enabled f90_seq = os.getenv('F90') stdqa = OrderedDict([ (r'Confirm library .* is .*', 'y'), # this one needs to be tried first! (r'.*fortran 90 compiler name .*\s*:\n\(suggestions\s*: .*\)', f90_seq), (r'.*fortran 90 compiler interfaced with .*\s*:\n\(suggestions\s*: .*\)', f90_seq), (r'Please type the ABSOLUTE name of .*library.*, or ignore\s*[:]*\s*[\n]*.*', ''), (r'Please .* to save this draft configuration file :\n.*', '%s.x' % self.conf_file), ]) no_qa = [ ".*ignored.", ] env.setvar('GMKTMP', self.builddir) env.setvar('GMKFILE', self.conf_file) run_cmd_qa("gmkfilemaker", qa, std_qa=stdqa, no_qa=no_qa) # set environment variables for installation dirs env.setvar('ROOTPACK', os.path.join(self.installdir, 'rootpack')) env.setvar('ROOTBIN', os.path.join(self.installdir, 'rootpack')) env.setvar('HOMEPACK', os.path.join(self.installdir, 'pack')) env.setvar('HOMEBIN', os.path.join(self.installdir, 'pack')) # patch config file to include right Fortran compiler flags regex_subs = [(r"^(FRTFLAGS\s*=.*)$", r"\1 %s" % os.getenv('FFLAGS'))] apply_regex_substitutions(self.conf_filepath, regex_subs)
def configure_step(self): """Custom configuration procedure for TensorFlow.""" binutils_root = get_software_root('binutils') if not binutils_root: raise EasyBuildError( "Failed to determine installation prefix for binutils") self.binutils_bin_path = os.path.join(binutils_root, 'bin') # filter out paths from CPATH and LIBRARY_PATH. This is needed since bazel will pull some dependencies that # might conflict with dependencies on the system and/or installed with EB. For example: protobuf path_filter = self.cfg['path_filter'] if path_filter: self.log.info( "Filtering $CPATH and $LIBRARY_PATH with path filter %s", path_filter) for var in ['CPATH', 'LIBRARY_PATH']: path = os.getenv(var).split(os.pathsep) self.log.info("$%s old value was %s" % (var, path)) filtered_path = os.pathsep.join( [p for fil in path_filter for p in path if fil not in p]) env.setvar(var, filtered_path) self.setup_build_dirs() use_wrapper = False if self.toolchain.comp_family() == toolchain.INTELCOMP: # put wrappers for Intel C/C++ compilers in place (required to make sure license server is found) # cfr. https://github.com/bazelbuild/bazel/issues/663 for compiler in ('icc', 'icpc'): self.write_wrapper(self.wrapper_dir, compiler, 'NOT-USED-WITH-ICC') use_wrapper = True use_mpi = self.toolchain.options.get('usempi', False) mpi_home = '' if use_mpi: impi_root = get_software_root('impi') if impi_root: # put wrappers for Intel MPI compiler wrappers in place # (required to make sure license server and I_MPI_ROOT are found) for compiler in (os.getenv('MPICC'), os.getenv('MPICXX')): self.write_wrapper(self.wrapper_dir, compiler, os.getenv('I_MPI_ROOT')) use_wrapper = True # set correct value for MPI_HOME mpi_home = os.path.join(impi_root, 'intel64') else: self.log.debug("MPI module name: %s", self.toolchain.MPI_MODULE_NAME[0]) mpi_home = get_software_root(self.toolchain.MPI_MODULE_NAME[0]) self.log.debug("Derived value for MPI_HOME: %s", mpi_home) if use_wrapper: env.setvar('PATH', os.pathsep.join([self.wrapper_dir, os.getenv('PATH')])) self.prepare_python() self.handle_jemalloc() self.verify_system_libs_info() self.system_libs_info = self.get_system_libs() cuda_root = get_software_root('CUDA') cudnn_root = get_software_root('cuDNN') opencl_root = get_software_root('OpenCL') tensorrt_root = get_software_root('TensorRT') nccl_root = get_software_root('NCCL') config_env_vars = { 'CC_OPT_FLAGS': os.getenv('CXXFLAGS'), 'MPI_HOME': mpi_home, 'PYTHON_BIN_PATH': self.python_cmd, 'PYTHON_LIB_PATH': os.path.join(self.installdir, self.pylibdir), 'TF_CUDA_CLANG': '0', 'TF_ENABLE_XLA': ('0', '1')[bool(self.cfg['with_xla'])], # XLA JIT support 'TF_NEED_CUDA': ('0', '1')[bool(cuda_root)], 'TF_NEED_GCP': '0', # Google Cloud Platform 'TF_NEED_GDR': '0', 'TF_NEED_HDFS': '0', # Hadoop File System 'TF_NEED_JEMALLOC': ('0', '1')[self.cfg['with_jemalloc']], 'TF_NEED_MPI': ('0', '1')[bool(use_mpi)], 'TF_NEED_OPENCL': ('0', '1')[bool(opencl_root)], 'TF_NEED_OPENCL_SYCL': '0', 'TF_NEED_ROCM': '0', 'TF_NEED_S3': '0', # Amazon S3 File System 'TF_NEED_TENSORRT': '0', 'TF_NEED_VERBS': '0', 'TF_NEED_AWS': '0', # Amazon AWS Platform 'TF_NEED_KAFKA': '0', # Amazon Kafka Platform 'TF_SET_ANDROID_WORKSPACE': '0', 'TF_DOWNLOAD_CLANG': '0', # Still experimental in TF 2.1.0 'TF_SYSTEM_LIBS': ','.join(self.system_libs_info[0]), } if cuda_root: cuda_version = get_software_version('CUDA') cuda_maj_min_ver = '.'.join(cuda_version.split('.')[:2]) # $GCC_HOST_COMPILER_PATH should be set to path of the actual compiler (not the MPI compiler wrapper) if use_mpi: compiler_path = which(os.getenv('CC_SEQ')) else: compiler_path = which(os.getenv('CC')) # list of CUDA compute capabilities to use can be specifed in two ways (where (2) overrules (1)): # (1) in the easyconfig file, via the custom cuda_compute_capabilities; # (2) in the EasyBuild configuration, via --cuda-compute-capabilities configuration option; ec_cuda_cc = self.cfg['cuda_compute_capabilities'] cfg_cuda_cc = build_option('cuda_compute_capabilities') cuda_cc = cfg_cuda_cc or ec_cuda_cc or [] if cfg_cuda_cc and ec_cuda_cc: warning_msg = "cuda_compute_capabilities specified in easyconfig (%s) are overruled by " % ec_cuda_cc warning_msg += "--cuda-compute-capabilities configuration option (%s)" % cfg_cuda_cc print_warning(warning_msg) elif not cuda_cc: warning_msg = "No CUDA compute capabilities specified, so using TensorFlow default " warning_msg += "(which may not be optimal for your system).\nYou should use " warning_msg += "the --cuda-compute-capabilities configuration option or the cuda_compute_capabilities " warning_msg += "easyconfig parameter to specify a list of CUDA compute capabilities to compile with." print_warning(warning_msg) # TensorFlow 1.12.1 requires compute capability >= 3.5 # see https://github.com/tensorflow/tensorflow/pull/25767 if LooseVersion(self.version) >= LooseVersion('1.12.1'): faulty_comp_caps = [ x for x in cuda_cc if LooseVersion(x) < LooseVersion('3.5') ] if faulty_comp_caps: error_msg = "TensorFlow >= 1.12.1 requires CUDA compute capabilities >= 3.5, " error_msg += "found one or more older ones: %s" raise EasyBuildError(error_msg, ', '.join(faulty_comp_caps)) if cuda_cc: self.log.info( "Compiling with specified list of CUDA compute capabilities: %s", ', '.join(cuda_cc)) config_env_vars.update({ 'CUDA_TOOLKIT_PATH': cuda_root, 'GCC_HOST_COMPILER_PATH': compiler_path, # This is the binutils bin folder: https://github.com/tensorflow/tensorflow/issues/39263 'GCC_HOST_COMPILER_PREFIX': self.binutils_bin_path, 'TF_CUDA_COMPUTE_CAPABILITIES': ','.join(cuda_cc), 'TF_CUDA_VERSION': cuda_maj_min_ver, }) # for recent TensorFlow versions, $TF_CUDA_PATHS and $TF_CUBLAS_VERSION must also be set if LooseVersion(self.version) >= LooseVersion('1.14'): # figure out correct major/minor version for CUBLAS from cublas_api.h cublas_api_header_glob_pattern = os.path.join( cuda_root, 'targets', '*', 'include', 'cublas_api.h') matches = glob.glob(cublas_api_header_glob_pattern) if len(matches) == 1: cublas_api_header_path = matches[0] cublas_api_header_txt = read_file(cublas_api_header_path) else: raise EasyBuildError( "Failed to isolate path to cublas_api.h: %s", matches) cublas_ver_parts = [] for key in [ 'CUBLAS_VER_MAJOR', 'CUBLAS_VER_MINOR', 'CUBLAS_VER_PATCH' ]: regex = re.compile("^#define %s ([0-9]+)" % key, re.M) res = regex.search(cublas_api_header_txt) if res: cublas_ver_parts.append(res.group(1)) else: raise EasyBuildError( "Failed to find pattern '%s' in %s", regex.pattern, cublas_api_header_path) config_env_vars.update({ 'TF_CUDA_PATHS': cuda_root, 'TF_CUBLAS_VERSION': '.'.join(cublas_ver_parts), }) if cudnn_root: cudnn_version = get_software_version('cuDNN') cudnn_maj_min_patch_ver = '.'.join( cudnn_version.split('.')[:3]) config_env_vars.update({ 'CUDNN_INSTALL_PATH': cudnn_root, 'TF_CUDNN_VERSION': cudnn_maj_min_patch_ver, }) else: raise EasyBuildError( "TensorFlow has a strict dependency on cuDNN if CUDA is enabled" ) if nccl_root: nccl_version = get_software_version('NCCL') # Ignore the PKG_REVISION identifier if it exists (i.e., report 2.4.6 for 2.4.6-1 or 2.4.6-2) nccl_version = nccl_version.split('-')[0] config_env_vars.update({ 'NCCL_INSTALL_PATH': nccl_root, }) else: nccl_version = '1.3' # Use simple downloadable version config_env_vars.update({ 'TF_NCCL_VERSION': nccl_version, }) if tensorrt_root: tensorrt_version = get_software_version('TensorRT') config_env_vars.update({ 'TF_NEED_TENSORRT': '1', 'TENSORRT_INSTALL_PATH': tensorrt_root, 'TF_TENSORRT_VERSION': tensorrt_version, }) for (key, val) in sorted(config_env_vars.items()): env.setvar(key, val) # Does no longer apply (and might not be required at all) since 1.12.0 if LooseVersion(self.version) < LooseVersion('1.12.0'): # patch configure.py (called by configure script) to avoid that Bazel abuses $HOME/.cache/bazel regex_subs = [(r"(run_shell\(\['bazel')", r"\1, '--output_base=%s', '--install_base=%s'" % (self.output_base_dir, self.install_base_dir))] apply_regex_substitutions('configure.py', regex_subs) # Tell Bazel to not use $HOME/.cache/bazel at all # See https://docs.bazel.build/versions/master/output_directories.html env.setvar('TEST_TMPDIR', self.output_root_dir) cmd = self.cfg['preconfigopts'] + './configure ' + self.cfg[ 'configopts'] run_cmd(cmd, log_all=True, simple=True)
def configure_step(self): """ Configure PETSc by setting configure options and running configure script. Configure procedure is much more concise for older versions (< v3). """ if LooseVersion(self.version) >= LooseVersion("3"): # make the install dir first if we are doing a download install, then keep it for the rest of the way deps = self.cfg["download_deps"] + self.cfg[ "download_deps_static"] + self.cfg["download_deps_shared"] if deps: self.log.info( "Creating the installation directory before the configure." ) self.make_installdir() self.cfg["keeppreviousinstall"] = True for dep in set(deps): self.cfg.update('configopts', '--download-%s=1' % dep) for dep in self.cfg["download_deps_static"]: self.cfg.update('configopts', '--download-%s-shared=0' % dep) for dep in self.cfg["download_deps_shared"]: self.cfg.update('configopts', '--download-%s-shared=1' % dep) # compilers self.cfg.update('configopts', '--with-cc="%s"' % os.getenv('CC')) self.cfg.update( 'configopts', '--with-cxx="%s" --with-c++-support' % os.getenv('CXX')) self.cfg.update('configopts', '--with-fc="%s"' % os.getenv('F90')) # compiler flags if LooseVersion(self.version) >= LooseVersion("3.5"): self.cfg.update('configopts', '--CFLAGS="%s"' % os.getenv('CFLAGS')) self.cfg.update('configopts', '--CXXFLAGS="%s"' % os.getenv('CXXFLAGS')) self.cfg.update('configopts', '--FFLAGS="%s"' % os.getenv('F90FLAGS')) else: self.cfg.update('configopts', '--with-cflags="%s"' % os.getenv('CFLAGS')) self.cfg.update('configopts', '--with-cxxflags="%s"' % os.getenv('CXXFLAGS')) self.cfg.update('configopts', '--with-fcflags="%s"' % os.getenv('F90FLAGS')) if not self.toolchain.comp_family( ) == toolchain.GCC: #@UndefinedVariable self.cfg.update('configopts', '--with-gnu-compilers=0') # MPI if self.toolchain.options.get('usempi', None): self.cfg.update('configopts', '--with-mpi=1') # build options self.cfg.update('configopts', '--with-build-step-np=%s' % self.cfg['parallel']) self.cfg.update( 'configopts', '--with-shared-libraries=%d' % self.cfg['shared_libs']) self.cfg.update( 'configopts', '--with-debugging=%d' % self.toolchain.options['debug']) self.cfg.update('configopts', '--with-pic=%d' % self.toolchain.options['pic']) self.cfg.update('configopts', '--with-x=0 --with-windows-graphics=0') # PAPI support if self.cfg['with_papi']: papi_inc = self.cfg['papi_inc'] papi_inc_file = os.path.join(papi_inc, "papi.h") papi_lib = self.cfg['papi_lib'] if os.path.isfile(papi_inc_file) and os.path.isfile(papi_lib): self.cfg.update('configopts', '--with-papi=1') self.cfg.update('configopts', '--with-papi-include=%s' % papi_inc) self.cfg.update('configopts', '--with-papi-lib=%s' % papi_lib) else: raise EasyBuildError( "PAPI header (%s) and/or lib (%s) not found, can not enable PAPI support?", papi_inc_file, papi_lib) # Python extensions_step if get_software_root('Python'): self.cfg.update('configopts', '--with-numpy=1') if self.cfg['shared_libs']: self.cfg.update('configopts', '--with-mpi4py=1') # FFTW, ScaLAPACK (and BLACS for older PETSc versions) deps = ["FFTW", "ScaLAPACK"] if LooseVersion(self.version) < LooseVersion("3.5"): deps.append("BLACS") for dep in deps: inc = os.getenv('%s_INC_DIR' % dep.upper()) libdir = os.getenv('%s_LIB_DIR' % dep.upper()) libs = os.getenv('%s_STATIC_LIBS' % dep.upper()) if inc and libdir and libs: with_arg = "--with-%s" % dep.lower() self.cfg.update('configopts', '%s=1' % with_arg) self.cfg.update('configopts', '%s-include=%s' % (with_arg, inc)) self.cfg.update( 'configopts', '%s-lib=[%s/%s]' % (with_arg, libdir, libs)) else: self.log.info( "Missing inc/lib info, so not enabling %s support." % dep) # BLAS, LAPACK libraries bl_libdir = os.getenv('BLAS_LAPACK_LIB_DIR') bl_libs = os.getenv('BLAS_LAPACK_STATIC_LIBS') if bl_libdir and bl_libs: self.cfg.update( 'configopts', '--with-blas-lapack-lib=[%s/%s]' % (bl_libdir, bl_libs)) else: raise EasyBuildError( "One or more environment variables for BLAS/LAPACK not defined?" ) # additional dependencies # filter out deps handled seperately depfilter = self.cfg.builddependencies() + [ "BLACS", "BLAS", "CMake", "FFTW", "LAPACK", "numpy", "mpi4py", "papi", "ScaLAPACK", "SuiteSparse" ] deps = [ dep['name'] for dep in self.cfg.dependencies() if not dep['name'] in depfilter ] for dep in deps: if type(dep) == str: dep = (dep, dep) deproot = get_software_root(dep[0]) if deproot: if (LooseVersion(self.version) >= LooseVersion("3.5")) and (dep[1] == "SCOTCH"): withdep = "--with-pt%s" % dep[1].lower( ) # --with-ptscotch is the configopt PETSc >= 3.5 else: withdep = "--with-%s" % dep[1].lower() self.cfg.update( 'configopts', '%s=1 %s-dir=%s' % (withdep, withdep, deproot)) # SuiteSparse options changed in PETSc 3.5, suitesparse = get_software_root('SuiteSparse') if suitesparse: if LooseVersion(self.version) >= LooseVersion("3.5"): withdep = "--with-suitesparse" # specified order of libs matters! ss_libs = [ "UMFPACK", "KLU", "CHOLMOD", "BTF", "CCOLAMD", "COLAMD", "CAMD", "AMD" ] suitesparse_inc = [ os.path.join(suitesparse, l, "Include") for l in ss_libs ] suitesparse_inc.append( os.path.join(suitesparse, "SuiteSparse_config")) inc_spec = "-include=[%s]" % ','.join(suitesparse_inc) suitesparse_libs = [ os.path.join(suitesparse, l, "Lib", "lib%s.a" % l.lower()) for l in ss_libs ] suitesparse_libs.append( os.path.join(suitesparse, "SuiteSparse_config", "libsuitesparseconfig.a")) lib_spec = "-lib=[%s]" % ','.join(suitesparse_libs) else: # CHOLMOD and UMFPACK are part of SuiteSparse (PETSc < 3.5) withdep = "--with-umfpack" inc_spec = "-include=%s" % os.path.join( suitesparse, "UMFPACK", "Include") # specified order of libs matters! umfpack_libs = [ os.path.join(suitesparse, l, "Lib", "lib%s.a" % l.lower()) for l in ["UMFPACK", "CHOLMOD", "COLAMD", "AMD"] ] lib_spec = "-lib=[%s]" % ','.join(umfpack_libs) self.cfg.update( 'configopts', ' '.join([ withdep + spec for spec in ['=1', inc_spec, lib_spec] ])) # set PETSC_DIR for configure (env) and build_step env.setvar('PETSC_DIR', self.cfg['start_dir']) self.cfg.update('buildopts', 'PETSC_DIR=%s' % self.cfg['start_dir']) if self.cfg['sourceinstall']: # run configure without --prefix (required) cmd = "%s ./configure %s" % (self.cfg['preconfigopts'], self.cfg['configopts']) (out, _) = run_cmd(cmd, log_all=True, simple=False) else: out = super(EB_PETSc, self).configure_step() # check for errors in configure error_regexp = re.compile("ERROR") if error_regexp.search(out): raise EasyBuildError("Error(s) detected in configure output!") if self.cfg['sourceinstall']: # figure out PETSC_ARCH setting petsc_arch_regex = re.compile("^\s*PETSC_ARCH:\s*(\S+)$", re.M) res = petsc_arch_regex.search(out) if res: self.petsc_arch = res.group(1) self.cfg.update('buildopts', 'PETSC_ARCH=%s' % self.petsc_arch) else: raise EasyBuildError( "Failed to determine PETSC_ARCH setting.") self.petsc_subdir = '%s-%s' % (self.name.lower(), self.version) else: # old versions (< 3.x) self.cfg.update('configopts', '--prefix=%s' % self.installdir) self.cfg.update('configopts', '--with-shared=1') # additional dependencies for dep in ["SCOTCH"]: deproot = get_software_root(dep) if deproot: withdep = "--with-%s" % dep.lower() self.cfg.update( 'configopts', '%s=1 %s-dir=%s' % (withdep, withdep, deproot)) cmd = "./config/configure.py %s" % self.get_cfg('configopts') run_cmd(cmd, log_all=True, simple=True) # PETSc > 3.5, make does not accept -j if LooseVersion(self.version) >= LooseVersion("3.5"): self.cfg['parallel'] = None
def sanity_check_step(self): """Custom sanity check for TensorFlow.""" custom_paths = { 'files': ['bin/tensorboard'], 'dirs': [self.pylibdir], } custom_commands = [ "%s -c 'import tensorflow'" % self.python_cmd, # tf_should_use importsweakref.finalize, which requires backports.weakref for Python < 3.4 "%s -c 'from tensorflow.python.util import tf_should_use'" % self.python_cmd, ] res = super(EB_TensorFlow, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands) # test installation using MNIST tutorial examples if self.cfg['runtest']: pythonpath = os.getenv('PYTHONPATH', '') env.setvar( 'PYTHONPATH', os.pathsep.join( [os.path.join(self.installdir, self.pylibdir), pythonpath])) mnist_pys = [] if LooseVersion(self.version) < LooseVersion('2.0'): mnist_pys.append('mnist_with_summaries.py') if LooseVersion(self.version) < LooseVersion('1.13'): # mnist_softmax.py was removed in TensorFlow 1.13.x mnist_pys.append('mnist_softmax.py') for mnist_py in mnist_pys: datadir = tempfile.mkdtemp(suffix='-tf-%s-data' % os.path.splitext(mnist_py)[0]) logdir = tempfile.mkdtemp(suffix='-tf-%s-logs' % os.path.splitext(mnist_py)[0]) mnist_py = os.path.join(self.start_dir, 'tensorflow', 'examples', 'tutorials', 'mnist', mnist_py) cmd = "%s %s --data_dir %s --log_dir %s" % ( self.python_cmd, mnist_py, datadir, logdir) run_cmd(cmd, log_all=True, simple=True, log_ok=True) # run test script (if any) if self.test_script: # copy test script to build dir before running it, to avoid that a file named 'tensorflow.py' # (a customized TensorFlow easyblock for example) breaks 'import tensorflow' test_script = os.path.join(self.builddir, os.path.basename(self.test_script)) copy_file(self.test_script, test_script) run_cmd("python %s" % test_script, log_all=True, simple=True, log_ok=True) return res
def configure_step(self): """Configure build: - set required environment variables (for netCDF, JasPer) - patch compile script and ungrib Makefile for non-default install paths of WRF and JasPer - run configure script and figure how to select desired build option - patch configure.wps file afterwards to fix 'serial compiler' setting """ # netCDF dependency check + setting env vars (NETCDF, NETCDFF) set_netcdf_env_vars(self.log) # WRF dependency check wrf = get_software_root('WRF') if wrf: wrfdir = os.path.join(wrf, det_wrf_subdir(get_software_version('WRF'))) else: raise EasyBuildError("WRF module not loaded?") self.compile_script = 'compile' if LooseVersion(self.version) >= LooseVersion('4.0.3'): # specify install location of WRF via $WRF_DIR (supported since WPS 4.0.3) # see https://github.com/wrf-model/WPS/pull/102 env.setvar('WRF_DIR', wrfdir) else: # patch compile script so that WRF is found regex_subs = [(r"^(\s*set\s*WRF_DIR_PRE\s*=\s*)\${DEV_TOP}(.*)$", r"\1%s\2" % wrfdir)] apply_regex_substitutions(self.compile_script, regex_subs) # libpng dependency check libpng = get_software_root('libpng') zlib = get_software_root('zlib') if libpng: paths = [libpng] if zlib: paths.insert(0, zlib) libpnginc = ' '.join( ['-I%s' % os.path.join(path, 'include') for path in paths]) libpnglib = ' '.join( ['-L%s' % os.path.join(path, 'lib') for path in paths]) else: # define these as empty, assume that libpng will be available via OS (e.g. due to --filter-deps=libpng) libpnglib = "" libpnginc = "" # JasPer dependency check + setting env vars jasper = get_software_root('JasPer') if jasper: env.setvar('JASPERINC', os.path.join(jasper, "include")) jasperlibdir = os.path.join(jasper, "lib") env.setvar('JASPERLIB', jasperlibdir) jasperlib = "-L%s" % jasperlibdir else: raise EasyBuildError("JasPer module not loaded?") # patch ungrib Makefile so that JasPer is found jasperlibs = "%s -ljasper %s -lpng" % (jasperlib, libpnglib) regex_subs = [ (r"^(\s*-L\.\s*-l\$\(LIBTARGET\))(\s*;.*)$", r"\1 %s\2" % jasperlibs), (r"^(\s*\$\(COMPRESSION_LIBS\))(\s*;.*)$", r"\1 %s\2" % jasperlibs), ] apply_regex_substitutions(os.path.join('ungrib', 'src', 'Makefile'), regex_subs) # patch arch/Config.pl script, so that run_cmd_qa receives all output to answer questions patch_perl_script_autoflush(os.path.join("arch", "Config.pl")) # Fix hardcoded cpp paths regex_subs = [('/usr/bin/cpp', 'cpp')] apply_regex_substitutions('arch/configure.defaults', regex_subs) # configure # determine build type option to look for self.comp_fam = self.toolchain.comp_family() build_type_option = None if LooseVersion(self.version) >= LooseVersion("3.4"): knownbuildtypes = {'smpar': 'serial', 'dmpar': 'dmpar'} if self.comp_fam == toolchain.INTELCOMP: # @UndefinedVariable build_type_option = " Linux x86_64, Intel compiler" elif self.comp_fam == toolchain.GCC: # @UndefinedVariable if LooseVersion(self.version) >= LooseVersion("3.6"): build_type_option = "Linux x86_64, gfortran" else: build_type_option = "Linux x86_64 g95" else: raise EasyBuildError( "Don't know how to figure out build type to select.") else: knownbuildtypes = {'smpar': 'serial', 'dmpar': 'DM parallel'} if self.comp_fam == toolchain.INTELCOMP: # @UndefinedVariable build_type_option = "PC Linux x86_64, Intel compiler" elif self.comp_fam == toolchain.GCC: # @UndefinedVariable build_type_option = "PC Linux x86_64, gfortran compiler," knownbuildtypes['dmpar'] = knownbuildtypes['dmpar'].upper() else: raise EasyBuildError( "Don't know how to figure out build type to select.") # check and fetch selected build type bt = self.cfg['buildtype'] if bt not in knownbuildtypes.keys(): raise EasyBuildError( "Unknown build type: '%s'. Supported build types: %s", bt, knownbuildtypes.keys()) # fetch option number based on build type option and selected build type build_type_question = r"\s*(?P<nr>[0-9]+).\s*%s\s*\(?%s\)?\s*\n" % ( build_type_option, knownbuildtypes[bt]) cmd = "./configure" qa = {} no_qa = [".*compiler is.*"] std_qa = { # named group in match will be used to construct answer r"%s(.*\n)*Enter selection\s*\[[0-9]+-[0-9]+\]\s*:" % build_type_question: "%(nr)s", } run_cmd_qa(cmd, qa, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True) # make sure correct compilers and compiler flags are being used comps = { 'SCC': "%s -I$(JASPERINC) %s" % (os.getenv('CC'), libpnginc), 'SFC': os.getenv('F90'), 'DM_FC': os.getenv('MPIF90'), 'DM_CC': os.getenv('MPICC'), 'FC': os.getenv('MPIF90'), 'CC': os.getenv('MPICC'), } if self.toolchain.options.get('openmp', None): comps.update({ 'LDFLAGS': '%s %s' % (self.toolchain.get_flag('openmp'), os.environ['LDFLAGS']) }) regex_subs = [(r"^(%s\s*=\s*).*$" % key, r"\1 %s" % val) for (key, val) in comps.items()] apply_regex_substitutions('configure.wps', regex_subs)
def configure_step(self): """Custom configuration procedure for ESMF through environment variables.""" env.setvar('ESMF_DIR', self.cfg['start_dir']) env.setvar('ESMF_INSTALL_PREFIX', self.installdir) env.setvar('ESMF_INSTALL_BINDIR', 'bin') env.setvar('ESMF_INSTALL_LIBDIR', 'lib') env.setvar('ESMF_INSTALL_MODDIR', 'mod') # specify compiler comp_family = self.toolchain.comp_family() if comp_family in [toolchain.GCC]: compiler = 'gfortran' else: compiler = comp_family.lower() env.setvar('ESMF_COMPILER', compiler) # specify MPI communications library if self.cfg.get('mpicomm', None): comm = self.cfg['mpicomm'] else: mpi_family = self.toolchain.mpi_family() if mpi_family in [toolchain.MPICH, toolchain.QLOGICMPI]: # MPICH family for MPICH v3.x, which is MPICH2 compatible comm = 'mpich2' else: comm = mpi_family.lower() env.setvar('ESMF_COMM', comm) # specify decent LAPACK lib env.setvar('ESMF_LAPACK', 'user') env.setvar('ESMF_LAPACK_LIBS', '%s %s' % (os.getenv('LDFLAGS'), os.getenv('LIBLAPACK_MT'))) # specify netCDF netcdf = get_software_root('netCDF') if netcdf: env.setvar('ESMF_NETCDF', 'user') netcdf_libs = ['-L%s/lib' % netcdf, '-lnetcdf'] # Fortran netcdff = get_software_root('netCDF-Fortran') if netcdff: netcdf_libs = ["-L%s/lib" % netcdff ] + netcdf_libs + ["-lnetcdff"] else: netcdf_libs.append('-lnetcdff') # C++ netcdfcxx = get_software_root('netCDF-C++') if netcdfcxx: netcdf_libs = ["-L%s/lib" % netcdfcxx ] + netcdf_libs + ["-lnetcdf_c++4"] else: netcdf_libs.append('-lnetcdf_c++4') env.setvar('ESMF_NETCDF_LIBS', ' '.join(netcdf_libs)) # 'make info' provides useful debug info cmd = "make info" run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def test_step(self): """Run WIEN2k test benchmarks. """ def run_wien2k_test(cmd_arg): """Run a WPS command, and check for success.""" cmd = "x_lapw lapw1 %s" % cmd_arg (out, _) = run_cmd(cmd, log_all=True, simple=False) re_success = re.compile("LAPW1\s+END") if not re_success.search(out): raise EasyBuildError( "Test '%s' in %s failed (pattern '%s' not found)?", cmd, os.getcwd(), re_success.pattern) else: self.log.info("Test '%s' seems to have run successfully: %s" % (cmd, out)) if self.cfg['runtest']: if not self.cfg['testdata']: raise EasyBuildError("List of URLs for testdata not provided.") # prepend $PATH with install directory, define $SCRATCH which is used by the tests env.setvar('PATH', "%s:%s" % (self.installdir, os.environ['PATH'])) try: cwd = os.getcwd() # create temporary directory tmpdir = tempfile.mkdtemp() os.chdir(tmpdir) self.log.info("Running tests in %s" % tmpdir) scratch = os.path.join(tmpdir, 'scratch') mkdir(scratch) env.setvar('SCRATCH', scratch) # download data testdata_paths = {} for testdata in self.cfg['testdata']: td_path = self.obtain_file(testdata) if not td_path: raise EasyBuildError( "Downloading file from %s failed?", testdata) testdata_paths.update( {os.path.basename(testdata): td_path}) self.log.debug('testdata_paths: %s' % testdata_paths) # unpack serial benchmark serial_test_name = "test_case" extract_file(testdata_paths['%s.tar.gz' % serial_test_name], tmpdir) # run serial benchmark os.chdir(os.path.join(tmpdir, serial_test_name)) run_wien2k_test("-c") # unpack parallel benchmark (in serial benchmark dir) parallel_test_name = "mpi-benchmark" extract_file(testdata_paths['%s.tar.gz' % parallel_test_name], tmpdir) # run parallel benchmark os.chdir(os.path.join(tmpdir, serial_test_name)) run_wien2k_test("-p") os.chdir(cwd) rmtree2(tmpdir) except OSError, err: raise EasyBuildError( "Failed to run WIEN2k benchmark tests: %s", err) self.log.debug("Current dir: %s" % os.getcwd())
def install_step(self): """Install components, if specified.""" comp_cnt = len(self.cfg['components']) for idx, cfg in enumerate(self.comp_cfgs): print_msg("installing bundle component %s v%s (%d/%d)..." % (cfg['name'], cfg['version'], idx + 1, comp_cnt)) self.log.info("Installing component %s v%s using easyblock %s", cfg['name'], cfg['version'], cfg.easyblock) comp = cfg.easyblock(cfg) # correct build/install dirs comp.builddir = self.builddir comp.install_subdir, comp.installdir = self.install_subdir, self.installdir # make sure we can build in parallel comp.set_parallel() # figure out correct start directory comp.guess_start_dir() # need to run fetch_patches to ensure per-component patches are applied comp.fetch_patches() comp.src = [] # find match entries in self.src for this component for source in comp.cfg['sources']: if isinstance(source, string_type): comp_src_fn = source elif isinstance(source, dict): if 'filename' in source: comp_src_fn = source['filename'] else: raise EasyBuildError( "Encountered source file specified as dict without 'filename': %s", source) else: raise EasyBuildError( "Specification of unknown type for source file: %s", source) found = False for src in self.src: if src['name'] == comp_src_fn: self.log.info( "Found spec for source %s for component %s: %s", comp_src_fn, comp.name, src) comp.src.append(src) found = True break if not found: raise EasyBuildError( "Failed to find spec for source %s for component %s", comp_src_fn, comp.name) # location of first unpacked source is used to determine where to apply patch(es) comp.src[-1]['finalpath'] = comp.cfg['start_dir'] # check if sanity checks are enabled for the component if self.cfg['sanity_check_all_components'] or comp.cfg[ 'name'] in self.cfg['sanity_check_components']: self.comp_cfgs_sanity_check.append(comp) # run relevant steps for step_name in ['patch', 'configure', 'build', 'install']: if step_name in cfg['skipsteps']: comp.log.info("Skipping '%s' step for component %s v%s", step_name, cfg['name'], cfg['version']) else: comp.run_step( step_name, [lambda x: getattr(x, '%s_step' % step_name)]) # update environment to ensure stuff provided by former components can be picked up by latter components # once the installation is finalised, this is handled by the generated module reqs = comp.make_module_req_guess() for envvar in reqs: curr_val = os.getenv(envvar, '') curr_paths = curr_val.split(os.pathsep) for subdir in reqs[envvar]: path = os.path.join(self.installdir, subdir) if path not in curr_paths: if curr_val: new_val = '%s:%s' % (path, curr_val) else: new_val = path env.setvar(envvar, new_val) # close log for this component comp.close_log()
def configure_step(self): # things might go wrong if a previous install dir is present, so let's get rid of it if not self.cfg['keeppreviousinstall']: self.log.info("Making sure any old installation is removed before we start the build...") super(EB_MVAPICH2, self).make_dir(self.installdir, True, dontcreateinstalldir=True) # additional configuration options add_configopts = [] add_configopts.append('--with-rdma=%s' % self.cfg['rdma_type']) # use POSIX threads add_configopts.append('--with-thread-package=pthreads') if self.cfg['debug']: # debug build, with error checking, timing and debug info # note: this will affact performance add_configopts.append('--enable-fast=none') else: # optimized build, no error checking, timing or debug info add_configopts.append('--enable-fast') # enable shared libraries, using GCC and GNU ld options add_configopts.extend(['--enable-shared', '--enable-sharedlibs=gcc']) # enable Fortran 77/90 and C++ bindings add_configopts.extend(['--enable-f77', '--enable-fc', '--enable-cxx']) # MVAPICH configure script complains when F90 or F90FLAGS are set, # they should be replaced with FC/FCFLAGS instead for (envvar, new_envvar) in [("F90", "FC"), ("F90FLAGS", "FCFLAGS")]: envvar_val = os.getenv(envvar) if envvar_val: new_envvar_val = os.getenv(new_envvar) env.setvar(envvar, '') if envvar_val == new_envvar_val: self.log.debug("$%s == $%s, just defined $%s as empty", envvar, new_envvar, envvar) elif new_envvar_val is None: env.setvar(new_envvar, envvar_val) else: raise EasyBuildError("Both $%s and $%s set, can I overwrite $%s with $%s (%s) ?", envvar, new_envvar, new_envvar, envvar, envvar_val) # enable specific support options (if desired) if self.cfg['withmpe']: add_configopts.append('--enable-mpe') if self.cfg['withlimic2']: add_configopts.append('--enable-limic2') if self.cfg['withchkpt']: add_configopts.extend(['--enable-checkpointing', '--with-hydra-ckpointlib=blcr']) if self.cfg['withhwloc']: add_configopts.append('--with-hwloc') # pass BLCR paths if specified if self.cfg['blcr_path']: add_configopts.append('--with-blcr=%s' % self.cfg['blcr_path']) if self.cfg['blcr_inc_path']: add_configopts.append('--with-blcr-include=%s' % self.cfg['blcr_inc_path']) if self.cfg['blcr_lib_path']: add_configopts.append('--with-blcr-libpath=%s' % self.cfg['blcr_lib_path']) self.cfg.update('configopts', ' '.join(add_configopts)) super(EB_MVAPICH2, self).configure_step()
def configure_step(self): """Custom configuration procedure for GROMACS: set configure options for configure or cmake.""" if LooseVersion(self.version) >= LooseVersion('4.6'): cuda = get_software_root('CUDA') if cuda: # CUDA with double precision is currently not supported in GROMACS yet # If easyconfig explicitly have double_precision=True error out, # otherwise warn about it and skip the double precision build if self.cfg.get('double_precision'): raise EasyBuildError( "Double precision is not available for GPU build. " + "Please explicitly set \"double_precision = False\" " + "or remove it in the easyconfig file.") if self.double_prec_pattern in self.cfg['configopts']: if self.cfg.get('double_precision') is None: # Only print warning once when trying double precision # build the first time self.cfg['double_precision'] = False self.log.info( "Double precision is not available for " + "GPU build. Skipping the double precision build.") self.log.info("skipping configure step") return if LooseVersion(self.version) >= LooseVersion('2021'): self.cfg.update( 'configopts', "-DGMX_GPU=CUDA -DCUDA_TOOLKIT_ROOT_DIR=%s" % cuda) else: self.cfg.update( 'configopts', "-DGMX_GPU=ON -DCUDA_TOOLKIT_ROOT_DIR=%s" % cuda) else: # explicitly disable GPU support if CUDA is not available, # to avoid that GROMACS find and uses a system-wide CUDA compiler self.cfg.update('configopts', "-DGMX_GPU=OFF") # check whether PLUMED is loaded as a dependency plumed_root = get_software_root('PLUMED') if plumed_root: # Need to check if PLUMED has an engine for this version engine = 'gromacs-%s' % self.version (out, _) = run_cmd("plumed-patch -l", log_all=True, simple=False) if not re.search(engine, out): raise EasyBuildError( "There is no support in PLUMED version %s for GROMACS %s: %s", get_software_version('PLUMED'), self.version, out) # PLUMED patching must be done at different stages depending on # version of GROMACS. Just prepare first part of cmd here plumed_cmd = "plumed-patch -p -e %s" % engine if LooseVersion(self.version) < LooseVersion('4.6'): self.log.info( "Using configure script for configuring GROMACS build.") if self.cfg['build_shared_libs']: self.cfg.update('configopts', "--enable-shared --disable-static") else: self.cfg.update('configopts', "--enable-static") # Use external BLAS and LAPACK self.cfg.update('configopts', "--with-external-blas --with-external-lapack") env.setvar('LIBS', "%s %s" % (os.environ['LIBLAPACK'], os.environ['LIBS'])) # Don't use the X window system self.cfg.update('configopts', "--without-x") # OpenMP is not supported for versions older than 4.5. if LooseVersion(self.version) >= LooseVersion('4.5'): # enable OpenMP support if desired if self.toolchain.options.get('openmp', None): self.cfg.update('configopts', "--enable-threads") else: self.cfg.update('configopts', "--disable-threads") elif self.toolchain.options.get('openmp', None): raise EasyBuildError( "GROMACS version %s does not support OpenMP" % self.version) # GSL support if get_software_root('GSL'): self.cfg.update('configopts', "--with-gsl") else: self.cfg.update('configopts', "--without-gsl") # actually run configure via ancestor (not direct parent) self.cfg['configure_cmd'] = "./configure" ConfigureMake.configure_step(self) # Now patch GROMACS for PLUMED between configure and build if plumed_root: run_cmd(plumed_cmd, log_all=True, simple=True) else: if '-DGMX_MPI=ON' in self.cfg['configopts']: mpi_numprocs = self.cfg.get('mpi_numprocs', 0) if mpi_numprocs == 0: self.log.info( "No number of test MPI tasks specified -- using default: %s", self.cfg['parallel']) mpi_numprocs = self.cfg['parallel'] elif mpi_numprocs > self.cfg['parallel']: self.log.warning( "Number of test MPI tasks (%s) is greater than value for 'parallel': %s", mpi_numprocs, self.cfg['parallel']) mpiexec = self.cfg.get('mpiexec') if mpiexec: mpiexec_path = which(mpiexec) if mpiexec_path: self.cfg.update('configopts', "-DMPIEXEC=%s" % mpiexec_path) self.cfg.update( 'configopts', "-DMPIEXEC_NUMPROC_FLAG=%s" % self.cfg.get('mpiexec_numproc_flag')) self.cfg.update('configopts', "-DNUMPROC=%s" % mpi_numprocs) elif self.cfg['runtest']: raise EasyBuildError("'%s' not found in $PATH", mpiexec) else: raise EasyBuildError("No value found for 'mpiexec'") self.log.info( "Using %s as MPI executable when testing, with numprocs flag '%s' and %s tasks", mpiexec_path, self.cfg.get('mpiexec_numproc_flag'), mpi_numprocs) if LooseVersion(self.version) >= LooseVersion('2019'): # Building the gmxapi interface requires shared libraries, # this is handled in the class initialisation so --module-only works self.cfg.update('configopts', "-DGMXAPI=ON") if LooseVersion(self.version) >= LooseVersion('2020'): # build Python bindings if Python is loaded as a dependency python_root = get_software_root('Python') if python_root: bin_python = os.path.join(python_root, 'bin', 'python') self.cfg.update('configopts', "-DPYTHON_EXECUTABLE=%s" % bin_python) self.cfg.update('configopts', "-DGMX_PYTHON_PACKAGE=ON") # Now patch GROMACS for PLUMED before cmake if plumed_root: if LooseVersion(self.version) >= LooseVersion('5.1'): # Use shared or static patch depending on # setting of self.cfg['build_shared_libs'] # and adapt cmake flags accordingly as per instructions # from "plumed patch -i" if self.cfg['build_shared_libs']: mode = 'shared' else: mode = 'static' plumed_cmd = plumed_cmd + ' -m %s' % mode run_cmd(plumed_cmd, log_all=True, simple=True) # prefer static libraries, if available if self.cfg['build_shared_libs']: self.cfg.update('configopts', "-DGMX_PREFER_STATIC_LIBS=OFF") else: self.cfg.update('configopts', "-DGMX_PREFER_STATIC_LIBS=ON") # always specify to use external BLAS/LAPACK self.cfg.update('configopts', "-DGMX_EXTERNAL_BLAS=ON -DGMX_EXTERNAL_LAPACK=ON") # disable GUI tools self.cfg.update('configopts', "-DGMX_X11=OFF") # convince to build for an older architecture than present on the build node by setting GMX_SIMD CMake flag # it does not make sense for Cray, because OPTARCH is defined by the Cray Toolchain if self.toolchain.toolchain_family() != toolchain.CRAYPE: gmx_simd = self.get_gromacs_arch() if gmx_simd: if LooseVersion(self.version) < LooseVersion('5.0'): self.cfg.update('configopts', "-DGMX_CPU_ACCELERATION=%s" % gmx_simd) else: self.cfg.update('configopts', "-DGMX_SIMD=%s" % gmx_simd) # set regression test path prefix = 'regressiontests' if any([src['name'].startswith(prefix) for src in self.src]): self.cfg.update( 'configopts', "-DREGRESSIONTEST_PATH='%%(builddir)s/%s-%%(version)s' " % prefix) # enable OpenMP support if desired if self.toolchain.options.get('openmp', None): self.cfg.update('configopts', "-DGMX_OPENMP=ON") else: self.cfg.update('configopts', "-DGMX_OPENMP=OFF") imkl_root = get_software_root('imkl') if imkl_root: # using MKL for FFT, so it will also be used for BLAS/LAPACK imkl_include = os.path.join(imkl_root, 'mkl', 'include') self.cfg.update( 'configopts', '-DGMX_FFT_LIBRARY=mkl -DMKL_INCLUDE_DIR="%s" ' % imkl_include) libs = os.getenv('LAPACK_STATIC_LIBS').split(',') mkl_libs = [ os.path.join(os.getenv('LAPACK_LIB_DIR'), lib) for lib in libs if lib != 'libgfortran.a' ] mkl_libs = ['-Wl,--start-group'] + mkl_libs + [ '-Wl,--end-group -lpthread -lm -ldl' ] self.cfg.update('configopts', '-DMKL_LIBRARIES="%s" ' % ';'.join(mkl_libs)) else: for libname in ['BLAS', 'LAPACK']: libdir = os.getenv('%s_LIB_DIR' % libname) if self.toolchain.toolchain_family() == toolchain.CRAYPE: libsci_mpi_mp_lib = glob.glob( os.path.join(libdir, 'libsci_*_mpi_mp.a')) if libsci_mpi_mp_lib: self.cfg.update( 'configopts', '-DGMX_%s_USER=%s' % (libname, libsci_mpi_mp_lib[0])) else: raise EasyBuildError( "Failed to find libsci library to link with for %s", libname) else: # -DGMX_BLAS_USER & -DGMX_LAPACK_USER require full path to library libs = os.getenv('%s_STATIC_LIBS' % libname).split(',') libpaths = [ os.path.join(libdir, lib) for lib in libs if lib != 'libgfortran.a' ] self.cfg.update( 'configopts', '-DGMX_%s_USER="******"' % (libname, ';'.join(libpaths))) # if libgfortran.a is listed, make sure it gets linked in too to avoiding linking issues if 'libgfortran.a' in libs: env.setvar( 'LDFLAGS', "%s -lgfortran -lm" % os.environ.get('LDFLAGS', '')) # no more GSL support in GROMACS 5.x, see http://redmine.gromacs.org/issues/1472 if LooseVersion(self.version) < LooseVersion('5.0'): # enable GSL when it's provided if get_software_root('GSL'): self.cfg.update('configopts', "-DGMX_GSL=ON") else: self.cfg.update('configopts', "-DGMX_GSL=OFF") # include flags for linking to zlib/XZ in $LDFLAGS if they're listed as a dep; # this is important for the tests, to correctly link against libxml2 for dep, link_flag in [('XZ', '-llzma'), ('zlib', '-lz')]: root = get_software_root(dep) if root: libdir = get_software_libdir(dep) ldflags = os.environ.get('LDFLAGS', '') env.setvar( 'LDFLAGS', "%s -L%s %s" % (ldflags, os.path.join(root, libdir), link_flag)) # complete configuration with configure_method of parent out = super(EB_GROMACS, self).configure_step() # for recent GROMACS versions, make very sure that a decent BLAS, LAPACK and FFT is found and used if LooseVersion(self.version) >= LooseVersion('4.6.5'): patterns = [ r"Using external FFT library - \S*", r"Looking for dgemm_ - found", r"Looking for cheev_ - found", ] for pattern in patterns: regex = re.compile(pattern, re.M) if not regex.search(out): raise EasyBuildError( "Pattern '%s' not found in GROMACS configuration output.", pattern)
def configure_step(self): """Configure Python package build/install.""" if self.python_cmd is None: self.prepare_python() if self.sitecfg is not None: # used by some extensions, like numpy, to find certain libs finaltxt = self.sitecfg if self.sitecfglibdir: repl = self.sitecfglibdir finaltxt = finaltxt.replace('SITECFGLIBDIR', repl) if self.sitecfgincdir: repl = self.sitecfgincdir finaltxt = finaltxt.replace('SITECFGINCDIR', repl) self.log.debug("Using %s: %s" % (self.sitecfgfn, finaltxt)) try: if os.path.exists(self.sitecfgfn): txt = open(self.sitecfgfn).read() self.log.debug("Found %s: %s" % (self.sitecfgfn, txt)) config = open(self.sitecfgfn, 'w') config.write(finaltxt) config.close() except IOError: raise EasyBuildError("Creating %s failed", self.sitecfgfn) # conservatively auto-enable checking of $LDSHARED if it is not explicitely enabled or disabled # only do this for sufficiently recent Python versions (>= 3.7 or Python 2.x >= 2.7.15) if self.cfg.get('check_ldshared') is None: pyver = det_python_version(self.python_cmd) recent_py2 = pyver.startswith( '2') and LooseVersion(pyver) >= LooseVersion('2.7.15') if recent_py2 or LooseVersion(pyver) >= LooseVersion('3.7'): self.log.info( "Checking of $LDSHARED auto-enabled for sufficiently recent Python version %s", pyver) self.cfg['check_ldshared'] = True else: self.log.info( "Not auto-enabling checking of $LDSHARED, Python version %s is not recent enough", pyver) # ensure that LDSHARED uses CC if self.cfg.get('check_ldshared', False): curr_cc = os.getenv('CC') python_ldshared = get_config_vars('LDSHARED')[0] if python_ldshared and curr_cc: if python_ldshared.split(' ')[0] == curr_cc: self.log.info( "Python's value for $LDSHARED ('%s') uses current $CC value ('%s'), not touching it", python_ldshared, curr_cc) else: self.log.info( "Python's value for $LDSHARED ('%s') doesn't use current $CC value ('%s'), fixing", python_ldshared, curr_cc) env.setvar("LDSHARED", curr_cc + " -shared") else: if curr_cc: self.log.info( "No $LDSHARED found for Python, setting to '%s -shared'", curr_cc) env.setvar("LDSHARED", curr_cc + " -shared") else: self.log.info( "No value set for $CC, so not touching $LDSHARED either" ) # creates log entries for python being used, for debugging run_cmd("%s -V" % self.python_cmd, verbose=False, trace=False) run_cmd("%s -c 'import sys; print(sys.executable)'" % self.python_cmd, verbose=False, trace=False) # don't add user site directory to sys.path (equivalent to python -s) # see https://www.python.org/dev/peps/pep-0370/ env.setvar('PYTHONNOUSERSITE', '1', verbose=False) run_cmd("%s -c 'import sys; print(sys.path)'" % self.python_cmd, verbose=False, trace=False)