def setup_dependent_environment(self, spack_env, run_env, dependent_spec): """Set TCLLIBPATH to include the tcl-shipped directory for extensions and any other tcl extension it depends on. For further info see: https://wiki.tcl.tk/1787""" # If we set TCLLIBPATH, we must also ensure that the corresponding # tcl is found in the build environment. This to prevent cases # where a system provided tcl is run against the standard libraries # of a Spack built tcl. See issue #7128 that relates to python but # it boils down to the same situation we have here. path = os.path.dirname(self.command.path) if not is_system_path(path): spack_env.prepend_path('PATH', path) tcl_paths = [join_path(self.prefix, self.tcl_builtin_lib_dir)] for d in dependent_spec.traverse(deptype=('build', 'run', 'test')): if d.package.extends(self.spec): tcl_paths.append(join_path(d.prefix, self.tcl_lib_dir)) # WARNING: paths in $TCLLIBPATH must be *space* separated, # its value is meant to be a Tcl list, *not* an env list # as explained here: https://wiki.tcl.tk/1787: # "TCLLIBPATH is a Tcl list, not some platform-specific # colon-separated or semi-colon separated format" tcllibpath = ' '.join(tcl_paths) spack_env.set('TCLLIBPATH', tcllibpath) # For run time environment set only the path for # dependent_spec and prepend it to TCLLIBPATH if dependent_spec.package.extends(self.spec): dependent_tcllibpath = join_path(dependent_spec.prefix, self.tcl_lib_dir) run_env.prepend_path('TCLLIBPATH', dependent_tcllibpath, separator=' ')
def setup_dependent_environment(self, spack_env, run_env, dependent_spec): """Set PYTHONPATH to include the site-packages directory for the extension and any other python extensions it depends on.""" # If we set PYTHONHOME, we must also ensure that the corresponding # python is found in the build environment. This to prevent cases # where a system provided python is run against the standard libraries # of a Spack built python. See issue #7128 spack_env.set('PYTHONHOME', self.home) path = os.path.dirname(self.command.path) if not is_system_path(path): spack_env.prepend_path('PATH', path) python_paths = [] for d in dependent_spec.traverse( deptype=('build', 'run', 'test')): if d.package.extends(self.spec): python_paths.append(join_path(d.prefix, self.site_packages_dir)) pythonpath = ':'.join(python_paths) spack_env.set('PYTHONPATH', pythonpath) # For run time environment set only the path for # dependent_spec and prepend it to PYTHONPATH if dependent_spec.package.extends(self.spec): run_env.prepend_path('PYTHONPATH', join_path( dependent_spec.prefix, self.site_packages_dir))
def add_include_path(dep_name): include_path = self.spec[dep_name].prefix.include if not is_system_path(include_path): env.append_path('SPACK_INCLUDE_DIRS', include_path)
def set_build_environment_variables(pkg, env, dirty): """Ensure a clean install environment when we build packages. This involves unsetting pesky environment variables that may affect the build. It also involves setting environment variables used by Spack's compiler wrappers. Args: pkg: The package we are building env: The build environment dirty (bool): Skip unsetting the user's environment settings """ # Gather information about various types of dependencies build_deps = set(pkg.spec.dependencies(deptype=('build', 'test'))) link_deps = set(pkg.spec.traverse(root=False, deptype=('link'))) build_link_deps = build_deps | link_deps rpath_deps = get_rpath_deps(pkg) link_dirs = [] include_dirs = [] rpath_dirs = [] # The top-level package is always RPATHed. It hasn't been installed yet # so the RPATHs are added unconditionally (e.g. even though lib64/ may # not be created for the install). for libdir in ['lib', 'lib64']: lib_path = os.path.join(pkg.prefix, libdir) rpath_dirs.append(lib_path) # Set up link, include, RPATH directories that are passed to the # compiler wrapper for dep in link_deps: if is_system_path(dep.prefix): continue query = pkg.spec[dep.name] dep_link_dirs = list() try: dep_link_dirs.extend(query.libs.directories) except NoLibrariesError: tty.debug("No libraries found for {0}".format(dep.name)) for default_lib_dir in ['lib', 'lib64']: default_lib_prefix = os.path.join(dep.prefix, default_lib_dir) if os.path.isdir(default_lib_prefix): dep_link_dirs.append(default_lib_prefix) link_dirs.extend(dep_link_dirs) if dep in rpath_deps: rpath_dirs.extend(dep_link_dirs) try: include_dirs.extend(query.headers.directories) except NoHeadersError: tty.debug("No headers found for {0}".format(dep.name)) link_dirs = list(dedupe(filter_system_paths(link_dirs))) include_dirs = list(dedupe(filter_system_paths(include_dirs))) rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs))) env.set(SPACK_LINK_DIRS, ':'.join(link_dirs)) env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs)) env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs)) build_prefixes = [dep.prefix for dep in build_deps] build_link_prefixes = [dep.prefix for dep in build_link_deps] # add run-time dependencies of direct build-time dependencies: for build_dep in build_deps: for run_dep in build_dep.traverse(deptype='run'): build_prefixes.append(run_dep.prefix) # Filter out system paths: ['/', '/usr', '/usr/local'] # These paths can be introduced into the build when an external package # is added as a dependency. The problem with these paths is that they often # contain hundreds of other packages installed in the same directory. # If these paths come first, they can overshadow Spack installations. build_prefixes = filter_system_paths(build_prefixes) build_link_prefixes = filter_system_paths(build_link_prefixes) # Add dependencies to CMAKE_PREFIX_PATH env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes) # Set environment variables if specified for # the given compiler compiler = pkg.compiler env.extend(spack.schema.environment.parse(compiler.environment)) if compiler.extra_rpaths: extra_rpaths = ':'.join(compiler.extra_rpaths) env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths) # Add bin directories from dependencies to the PATH for the build. for prefix in build_prefixes: for dirname in ['bin', 'bin64']: bin_dir = os.path.join(prefix, dirname) if os.path.isdir(bin_dir): env.prepend_path('PATH', bin_dir) # Add spack build environment path with compiler wrappers first in # the path. We add the compiler wrapper path, which includes default # wrappers (cc, c++, f77, f90), AND a subdirectory containing # compiler-specific symlinks. The latter ensures that builds that # are sensitive to the *name* of the compiler see the right name when # we're building with the wrappers. # # Conflicts on case-insensitive systems (like "CC" and "cc") are # handled by putting one in the <build_env_path>/case-insensitive # directory. Add that to the path too. env_paths = [] compiler_specific = os.path.join( spack.paths.build_env_path, os.path.dirname(pkg.compiler.link_paths['cc'])) for item in [spack.paths.build_env_path, compiler_specific]: env_paths.append(item) ci = os.path.join(item, 'case-insensitive') if os.path.isdir(ci): env_paths.append(ci) for item in env_paths: env.prepend_path('PATH', item) env.set_path(SPACK_ENV_PATH, env_paths) # Working directory for the spack command itself, for debug logs. if spack.config.get('config:debug'): env.set(SPACK_DEBUG, 'TRUE') env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec) env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}')) env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir) # Find ccache binary and hand it to build environment if spack.config.get('config:ccache'): ccache = Executable('ccache') if not ccache: raise RuntimeError("No ccache binary found in PATH") env.set(SPACK_CCACHE_BINARY, ccache) # Add any pkgconfig directories to PKG_CONFIG_PATH for prefix in build_link_prefixes: for directory in ('lib', 'lib64', 'share'): pcdir = os.path.join(prefix, directory, 'pkgconfig') if os.path.isdir(pcdir): env.prepend_path('PKG_CONFIG_PATH', pcdir) return env
def test_is_system_path(): assert (envutil.is_system_path('/usr/bin')) assert (not envutil.is_system_path('/nonsense_path/bin'))
def configure_args(self): # Always enable this option. This does not actually enable any # features: it only *allows* the user to specify certain # combinations of other arguments. Enabling it just skips a # sanity check in configure, so this doesn't merit a variant. extra_args = ['--enable-unsupported', '--enable-symbols=yes'] # Do not specify the prefix of zlib if it is in a system directory # (see https://github.com/spack/spack/pull/21900). zlib_prefix = self.spec['zlib'].prefix extra_args.append('--with-zlib={0}'.format( 'yes' if is_system_path(zlib_prefix) else zlib_prefix)) extra_args += self.enable_or_disable('threadsafe') extra_args += self.enable_or_disable('cxx') extra_args += self.enable_or_disable('hl') extra_args += self.enable_or_disable('fortran') extra_args += self.enable_or_disable('java') extra_args += self.with_or_without('szip') api = self.spec.variants['api'].value if api != 'none': extra_args.append('--with-default-api-version=' + api) if self.spec.satisfies('@1.10:'): if '+debug' in self.spec: extra_args.append('--enable-build-mode=debug') else: extra_args.append('--enable-build-mode=production') else: if '+debug' in self.spec: extra_args.append('--enable-debug=all') else: extra_args.append('--enable-production') # '--enable-fortran2003' no longer exists as of version 1.10.0 if '+fortran' in self.spec: extra_args.append('--enable-fortran2003') else: extra_args.append('--disable-fortran2003') if '+shared' in self.spec: extra_args.append('--enable-shared') else: extra_args.append('--disable-shared') extra_args.append('--enable-static-exec') if '+pic' in self.spec: extra_args.extend([ 'CFLAGS=' + self.compiler.cc_pic_flag, 'CXXFLAGS=' + self.compiler.cxx_pic_flag, 'FCFLAGS=' + self.compiler.fc_pic_flag, ]) # Fujitsu Compiler dose not add Fortran runtime in rpath. if '+fortran %fj' in self.spec: extra_args.append('LDFLAGS=-lfj90i -lfj90f -lfjsrcinfo -lelf') if '+mpi' in self.spec: # The HDF5 configure script warns if cxx and mpi are enabled # together. There doesn't seem to be a real reason for this, except # that parts of the MPI interface are not accessible via the C++ # interface. Since they are still accessible via the C interface, # this is not actually a problem. extra_args += [ '--enable-parallel', 'CC=%s' % self.spec['mpi'].mpicc ] if '+cxx' in self.spec: extra_args.append('CXX=%s' % self.spec['mpi'].mpicxx) if '+fortran' in self.spec: extra_args.append('FC=%s' % self.spec['mpi'].mpifc) return extra_args
def yes_or_prefix(spec_name): prefix = self.spec[spec_name].prefix return 'yes' if is_system_path(prefix) else prefix
def configure_args(self): config_args = [] flags = defaultdict(list) def yes_or_prefix(spec_name): prefix = self.spec[spec_name].prefix return 'yes' if is_system_path(prefix) else prefix if '+netcdf' in self.spec: config_args.append('--with-netcdf=' + yes_or_prefix('netcdf-c')) # We need to make sure that the libtool script of libcdi - the # internal library of CDO - finds the correct version of hdf5. # Note that the argument of --with-hdf5 is not passed to the # configure script of libcdi, therefore we have to provide # additional flags regardless of whether hdf5 support is enabled. hdf5_spec = self.spec['hdf5'] if not is_system_path(hdf5_spec.prefix): flags['LDFLAGS'].append(self.spec['hdf5'].libs.search_flags) else: config_args.append('--without-netcdf') if self.spec.variants['grib2'].value == 'eccodes': if self.spec.satisfies('@1.9:'): config_args.append('--with-eccodes=' + yes_or_prefix('eccodes')) config_args.append('--without-grib_api') else: config_args.append('--with-grib_api=yes') eccodes_spec = self.spec['eccodes'] eccodes_libs = eccodes_spec.libs flags['LIBS'].append(eccodes_libs.link_flags) if not is_system_path(eccodes_spec.prefix): flags['LDFLAGS'].append(eccodes_libs.search_flags) elif self.spec.variants['grib2'].value == 'grib-api': config_args.append('--with-grib_api=' + yes_or_prefix('grib-api')) if self.spec.satisfies('@1.9:'): config_args.append('--without-eccodes') else: config_args.append('--without-grib_api') if self.spec.satisfies('@1.9:'): config_args.append('--without-eccodes') if '+external-grib1' in self.spec: config_args.append('--disable-cgribex') else: config_args.append('--enable-cgribex') if '+szip' in self.spec: config_args.append('--with-szlib=' + yes_or_prefix('szip')) else: config_args.append('--without-szlib') config_args += self.with_or_without('hdf5', activation_value=yes_or_prefix) config_args += self.with_or_without( 'udunits2', activation_value=lambda x: yes_or_prefix('udunits')) if '+libxml2' in self.spec: libxml2_spec = self.spec['libxml2'] if is_system_path(libxml2_spec.prefix): config_args.append('--with-libxml2=yes') # Spack does not inject the header search flag in this case, # which is still required, unless libxml2 is installed to '/usr' # (handled by the configure script of CDO). if libxml2_spec.prefix != '/usr': flags['CPPFLAGS'].append( libxml2_spec.headers.include_flags) else: config_args.append('--with-libxml2=' + libxml2_spec.prefix) else: config_args.append('--without-libxml2') config_args += self.with_or_without('proj', activation_value=yes_or_prefix) config_args += self.with_or_without('curl', activation_value=yes_or_prefix) config_args += self.with_or_without('magics', activation_value=yes_or_prefix) config_args += self.with_or_without('fftw3') config_args += self.enable_or_disable('openmp') # Starting version 1.9.0 CDO is a C++ program but it uses the C # interface of HDF5 without the parallel features. To avoid # unnecessary dependencies on mpi's cxx library, we need to set the # following flags. This works for OpenMPI, MPICH, MVAPICH, Intel MPI, # IBM Spectrum MPI, bullx MPI, and Cray MPI. if self.spec.satisfies('@1.9:+hdf5^hdf5+mpi'): flags['CPPFLAGS'].append('-DOMPI_SKIP_MPICXX -DMPICH_SKIP_MPICXX') config_args.extend([ '{0}={1}'.format(var, ' '.join(val)) for var, val in flags.items() ]) return config_args