コード例 #1
0
ファイル: stage.py プロジェクト: justintoo/spack
    def create(self):
        """Creates the stage directory.

        If get_tmp_root() is None, the stage directory is created
        directly under spack.stage_path, otherwise this will attempt to
        create a stage in a temporary directory and link it into
        spack.stage_path.

        Spack will use the first writable location in spack.tmp_dirs
        to create a stage. If there is no valid location in tmp_dirs,
        fall back to making the stage inside spack.stage_path.

        """
        # Create the top-level stage directory
        mkdirp(spack.stage_path)
        remove_if_dead_link(self.path)

        # If a tmp_root exists then create a directory there and then link it
        # in the stage area, otherwise create the stage directory in self.path
        if self._need_to_create_path():
            tmp_root = get_tmp_root()
            if tmp_root is not None:
                tmp_dir = tempfile.mkdtemp('', _stage_prefix, tmp_root)
                tty.debug('link %s -> %s' % (self.path, tmp_dir))
                os.symlink(tmp_dir, self.path)
            else:
                mkdirp(self.path)
        # Make sure we can actually do something with the stage we made.
        ensure_access(self.path)
コード例 #2
0
ファイル: stage.py プロジェクト: LLNL/spack
    def create(self):
        """Creates the stage directory.

        If get_tmp_root() is None, the stage directory is created
        directly under spack.paths.stage_path, otherwise this will attempt to
        create a stage in a temporary directory and link it into
        spack.paths.stage_path.

        """
        # Create the top-level stage directory
        mkdirp(spack.paths.stage_path)
        remove_if_dead_link(self.path)

        # If a tmp_root exists then create a directory there and then link it
        # in the stage area, otherwise create the stage directory in self.path
        if self._need_to_create_path():
            tmp_root = get_tmp_root()
            if tmp_root is not None:
                # tempfile.mkdtemp already sets mode 0700
                tmp_dir = tempfile.mkdtemp('', _stage_prefix, tmp_root)
                tty.debug('link %s -> %s' % (self.path, tmp_dir))
                os.symlink(tmp_dir, self.path)
            else:
                # emulate file permissions for tempfile.mkdtemp
                mkdirp(self.path, mode=stat.S_IRWXU)
        # Make sure we can actually do something with the stage we made.
        ensure_access(self.path)
        self.created = True
コード例 #3
0
ファイル: cray.py プロジェクト: alfredo-gimenez/spack
    def _default_target_from_env(self):
        '''Set and return the default CrayPE target loaded in a clean login
        session.

        A bash subshell is launched with a wiped environment and the list of
        loaded modules is parsed for the first acceptable CrayPE target.
        '''
        # Based on the incantation:
        # echo "$(env - USER=$USER /bin/bash -l -c 'module list -lt')"
        if getattr(self, 'default', None) is None:
            env = which('env')
            env.add_default_arg('-')
            # CAUTION - $USER is generally needed in the sub-environment.
            # There may be other variables needed for general success.
            output = env('USER=%s' % os.environ['USER'],
                         'HOME=%s' % os.environ['HOME'],
                         '/bin/bash', '--noprofile', '--norc', '-c',
                         '. /etc/profile; module list -lt',
                         output=str, error=str)
            self._defmods = _get_modules_in_modulecmd_output(output)
            targets = []
            _fill_craype_targets_from_modules(targets, self._defmods)
            self.default = targets[0] if targets else None
            tty.debug("Found default modules:",
                      *["     %s" % mod for mod in self._defmods])
        return self.default
コード例 #4
0
ファイル: database.py プロジェクト: LLNL/spack
    def __init__(self, root, db_dir=None):
        """Create a Database for Spack installations under ``root``.

        A Database is a cache of Specs data from ``$prefix/spec.yaml``
        files in Spack installation directories.

        By default, Database files (data and lock files) are stored
        under ``root/.spack-db``, which is created if it does not
        exist.  This is the ``db_dir``.

        The Database will attempt to read an ``index.json`` file in
        ``db_dir``.  If it does not find one, it will fall back to read
        an ``index.yaml`` if one is present.  If that does not exist, it
        will create a database when needed by scanning the entire
        Database root for ``spec.yaml`` files according to Spack's
        ``DirectoryLayout``.

        Caller may optionally provide a custom ``db_dir`` parameter
        where data will be stored.  This is intended to be used for
        testing the Database class.

        """
        self.root = root

        if db_dir is None:
            # If the db_dir is not provided, default to within the db root.
            self._db_dir = os.path.join(self.root, _db_dirname)
        else:
            # Allow customizing the database directory location for testing.
            self._db_dir = db_dir

        # Set up layout of database files within the db dir
        self._old_yaml_index_path = os.path.join(self._db_dir, 'index.yaml')
        self._index_path = os.path.join(self._db_dir, 'index.json')
        self._lock_path = os.path.join(self._db_dir, 'lock')

        # This is for other classes to use to lock prefix directories.
        self.prefix_lock_path = os.path.join(self._db_dir, 'prefix_lock')

        # Create needed directories and files
        if not os.path.exists(self._db_dir):
            mkdirp(self._db_dir)

        # initialize rest of state.
        self.db_lock_timeout = (
            spack.config.get('config:db_lock_timeout') or _db_lock_timeout)
        self.package_lock_timeout = (
            spack.config.get('config:package_lock_timeout') or None)
        tty.debug('DATABASE LOCK TIMEOUT: {0}s'.format(
                  str(self.db_lock_timeout)))
        timeout_format_str = ('{0}s'.format(str(self.package_lock_timeout))
                              if self.package_lock_timeout else 'No timeout')
        tty.debug('PACKAGE LOCK TIMEOUT: {0}'.format(
                  str(timeout_format_str)))
        self.lock = Lock(self._lock_path,
                         default_timeout=self.db_lock_timeout)
        self._data = {}

        # whether there was an error at the start of a read transaction
        self._error = None
コード例 #5
0
ファイル: stage.py プロジェクト: AaronTHolt/spack
    def fetch(self):
        """Downloads an archive or checks out code from a repository."""
        self.chdir()

        fetchers = [self.fetcher]

        # TODO: move mirror logic out of here and clean it up!
        if self.mirror_path:
            urls = ["%s/%s" % (m, self.mirror_path) for m in _get_mirrors()]

            digest = None
            if isinstance(self.fetcher, fs.URLFetchStrategy):
                digest = self.fetcher.digest
            fetchers = [fs.URLFetchStrategy(url, digest)
                        for url in urls] + fetchers
            for f in fetchers:
                f.set_stage(self)

        for fetcher in fetchers:
            try:
                fetcher.fetch()
                break
            except spack.error.SpackError, e:
                tty.msg("Fetching from %s failed." % fetcher)
                tty.debug(e)
                continue
コード例 #6
0
ファイル: environment.py プロジェクト: LLNL/spack
    def _get_environment_specs(self, recurse_dependencies=True):
        """Returns the specs of all the packages in an environment.

        If these specs appear under different user_specs, only one copy
        is added to the list returned.
        """
        package_to_spec = {}
        spec_list = list()

        for spec_hash in self.concretized_order:
            spec = self.specs_by_hash[spec_hash]

            specs = (spec.traverse(deptype=('link', 'run'))
                     if recurse_dependencies else (spec,))

            for dep in specs:
                prior = package_to_spec.get(dep.name)
                if prior and prior != dep:
                    tty.debug("{0} takes priority over {1}"
                              .format(package_to_spec[dep.name].format(),
                                      dep.format()))
                else:
                    package_to_spec[dep.name] = dep
                    spec_list.append(dep)

        return spec_list
コード例 #7
0
ファイル: filesystem.py プロジェクト: matzke1/spack
def copy(src, dest, _permissions=False):
    """Copies the file *src* to the file or directory *dest*.

    If *dest* specifies a directory, the file will be copied into *dest*
    using the base filename from *src*.

    Parameters:
        src (str): the file to copy
        dest (str): the destination file or directory
        _permissions (bool): for internal use only
    """
    if _permissions:
        tty.debug('Installing {0} to {1}'.format(src, dest))
    else:
        tty.debug('Copying {0} to {1}'.format(src, dest))

    # Expand dest to its eventual full path if it is a directory.
    if os.path.isdir(dest):
        dest = join_path(dest, os.path.basename(src))

    shutil.copy(src, dest)

    if _permissions:
        set_install_permissions(dest)
        copy_mode(src, dest)
コード例 #8
0
ファイル: versions.py プロジェクト: LLNL/spack
def versions(parser, args):
    pkg = spack.repo.get(args.package)

    tty.msg('Safe versions (already checksummed):')

    safe_versions = pkg.versions

    if not safe_versions:
        print('  Found no versions for {0}'.format(pkg.name))
        tty.debug('Manually add versions to the package.')
    else:
        colify(sorted(safe_versions, reverse=True), indent=2)

    tty.msg('Remote versions (not yet checksummed):')

    fetched_versions = pkg.fetch_remote_versions()
    remote_versions = set(fetched_versions).difference(safe_versions)

    if not remote_versions:
        if not fetched_versions:
            print('  Found no versions for {0}'.format(pkg.name))
            tty.debug('Check the list_url and list_depth attributes of the '
                      'package to help Spack find versions.')
        else:
            print('  Found no unchecksummed versions for {0}'.format(pkg.name))
    else:
        colify(sorted(remote_versions, reverse=True), indent=2)
コード例 #9
0
ファイル: modules.py プロジェクト: Exteris/spack
    def blacklisted(self):
        configuration = CONFIGURATION.get(self.name, {})
        whitelist_matches = [x
                             for x in configuration.get('whitelist', [])
                             if self.spec.satisfies(x)]
        blacklist_matches = [x
                             for x in configuration.get('blacklist', [])
                             if self.spec.satisfies(x)]
        if whitelist_matches:
            message = '\tWHITELIST : %s [matches : ' % self.spec.cshort_spec
            for rule in whitelist_matches:
                message += '%s ' % rule
            message += ' ]'
            tty.debug(message)

        if blacklist_matches:
            message = '\tBLACKLIST : %s [matches : ' % self.spec.cshort_spec
            for rule in blacklist_matches:
                message += '%s ' % rule
            message += ' ]'
            tty.debug(message)

        if not whitelist_matches and blacklist_matches:
            return True

        return False
コード例 #10
0
ファイル: config.py プロジェクト: d-tk/spack
def _read_config_file(filename, schema):
    """Read a YAML configuration file."""
    # Ignore nonexisting files.
    if not os.path.exists(filename):
        return None

    elif not os.path.isfile(filename):
        raise ConfigFileError(
            "Invlaid configuration. %s exists but is not a file." % filename)

    elif not os.access(filename, os.R_OK):
        raise ConfigFileError("Config file is not readable: %s" % filename)

    try:
        tty.debug("Reading config file %s" % filename)
        with open(filename) as f:
            data = syaml.load(f)

        if data:
            validate_section(data, schema)
        return data

    except MarkedYAMLError as e:
        raise ConfigFileError(
            "Error parsing yaml%s: %s" % (str(e.context_mark), e.problem))

    except IOError as e:
        raise ConfigFileError(
            "Error reading configuration file %s: %s" % (filename, str(e)))
コード例 #11
0
ファイル: filesystem.py プロジェクト: citibeth/ectl
def install_tree(src, dest, **kwargs):
    """Manually install a file to a particular location."""
    tty.debug("Installing %s to %s" % (src, dest))
    shutil.copytree(src, dest, **kwargs)

    for s, d in traverse_tree(src, dest, follow_nonexisting=False):
        set_install_permissions(d)
        copy_mode(s, d)
コード例 #12
0
ファイル: compiler.py プロジェクト: AaronTHolt/spack
 def check(key):
     try:
         full_path, prefix, suffix = key
         version = detect_version(full_path)
         return (version, prefix, suffix, full_path)
     except ProcessError, e:
         tty.debug("Couldn't get version for compiler %s" % full_path, e)
         return None
コード例 #13
0
ファイル: executable.py プロジェクト: jgalarowicz/spack
    def __call__(self, *args, **kwargs):
        """Run the executable with subprocess.check_output, return output."""
        return_output = kwargs.get("return_output", False)
        fail_on_error = kwargs.get("fail_on_error", True)
        ignore_errors = kwargs.get("ignore_errors", ())

        output        = kwargs.get("output", sys.stdout)
        error         = kwargs.get("error", sys.stderr)
        input         = kwargs.get("input", None)

        def streamify(arg, mode):
            if isinstance(arg, basestring):
                return open(arg, mode), True
            elif arg is None and mode != 'r':
                return open(os.devnull, mode), True
            return arg, False
        output, ostream = streamify(output, 'w')
        error,  estream = streamify(error,  'w')
        input,  istream = streamify(input,  'r')

        # if they just want to ignore one error code, make it a tuple.
        if isinstance(ignore_errors, int):
            ignore_errors = (ignore_errors,)

        quoted_args = [arg for arg in args if re.search(r'^"|^\'|"$|\'$', arg)]
        if quoted_args:
            tty.warn("Quotes in command arguments can confuse scripts like configure.",
                     "The following arguments may cause problems when executed:",
                     str("\n".join(["    "+arg for arg in quoted_args])),
                     "Quotes aren't needed because spack doesn't use a shell.",
                     "Consider removing them")

        cmd = self.exe + list(args)

        cmd_line = ' '.join(cmd)
        tty.debug(cmd_line)

        try:
            proc = subprocess.Popen(
                cmd,
                stdin=input,
                stderr=error,
                stdout=subprocess.PIPE if return_output else output)
            out, err = proc.communicate()
            self.returncode = proc.returncode

            rc = proc.returncode
            if fail_on_error and rc != 0 and (rc not in ignore_errors):
                raise ProcessError("Command exited with status %d:"
                                   % proc.returncode, cmd_line)
            if return_output:
                return out

        except OSError, e:
            raise ProcessError(
                "%s: %s" % (self.exe[0], e.strerror),
                "Command: " + cmd_line)
コード例 #14
0
ファイル: compiler.py プロジェクト: alfredo-gimenez/spack
    def _find_matches_in_path(cls, compiler_names, detect_version, *path):
        """Finds compilers in the paths supplied.

           Looks for all combinations of ``compiler_names`` with the
           ``prefixes`` and ``suffixes`` defined for this compiler
           class.  If any compilers match the compiler_names,
           prefixes, or suffixes, uses ``detect_version`` to figure
           out what version the compiler is.

           This returns a dict with compilers grouped by (prefix,
           suffix, version) tuples.  This can be further organized by
           find().
        """
        if not path:
            path = get_path('PATH')

        prefixes = [''] + cls.prefixes
        suffixes = [''] + cls.suffixes

        checks = []
        for directory in path:
            if not (os.path.isdir(directory) and
                    os.access(directory, os.R_OK | os.X_OK)):
                continue

            files = os.listdir(directory)
            for exe in files:
                full_path = join_path(directory, exe)

                prod = itertools.product(prefixes, compiler_names, suffixes)
                for pre, name, suf in prod:
                    regex = r'^(%s)%s(%s)$' % (pre, re.escape(name), suf)

                    match = re.match(regex, exe)
                    if match:
                        key = (full_path,) + match.groups()
                        checks.append(key)

        def check(key):
            try:
                full_path, prefix, suffix = key
                version = detect_version(full_path)
                return (version, prefix, suffix, full_path)
            except ProcessError, e:
                tty.debug(
                    "Couldn't get version for compiler %s" % full_path, e)
                return None
            except Exception, e:
                # Catching "Exception" here is fine because it just
                # means something went wrong running a candidate executable.
                tty.debug("Error while executing candidate compiler %s"
                          % full_path,
                          "%s: %s" % (e.__class__.__name__, e))
                return None
コード例 #15
0
ファイル: binary_distribution.py プロジェクト: LLNL/spack
def get_specs(force=False):
    """
    Get spec.yaml's for build caches available on mirror
    """
    global _cached_specs

    if _cached_specs:
        tty.debug("Using previously-retrieved specs")
        return _cached_specs

    mirrors = spack.config.get('mirrors')
    if len(mirrors) == 0:
        tty.warn("No Spack mirrors are currently configured")
        return {}

    path = str(spack.architecture.sys_type())
    urls = set()
    for key in mirrors:
        url = mirrors[key]
        if url.startswith('file'):
            mirror = url.replace('file://', '') + '/build_cache'
            tty.msg("Finding buildcaches in %s" % mirror)
            if os.path.exists(mirror):
                files = os.listdir(mirror)
                for file in files:
                    if re.search('spec.yaml', file):
                        link = 'file://' + mirror + '/' + file
                        urls.add(link)
        else:
            tty.msg("Finding buildcaches on %s" % url)
            p, links = spider(url + "/build_cache")
            for link in links:
                if re.search("spec.yaml", link) and re.search(path, link):
                    urls.add(link)

    _cached_specs = set()
    for link in urls:
        with Stage(link, name="build_cache", keep=True) as stage:
            if force and os.path.exists(stage.save_filename):
                os.remove(stage.save_filename)
            if not os.path.exists(stage.save_filename):
                try:
                    stage.fetch()
                except fs.FetchError:
                    continue
            with open(stage.save_filename, 'r') as f:
                # read the spec from the build cache file. All specs
                # in build caches are concrete (as they are built) so
                # we need to mark this spec concrete on read-in.
                spec = spack.spec.Spec.from_yaml(f)
                spec._mark_concrete()
                _cached_specs.add(spec)

    return _cached_specs
コード例 #16
0
ファイル: sbang.py プロジェクト: LLNL/spack
def post_install(spec):
    """This hook edits scripts so that they call /bin/bash
    $spack_prefix/bin/sbang instead of something longer than the
    shebang limit.
    """
    if spec.external:
        tty.debug('SKIP: shebang filtering [external package]')
        return

    for directory, _, filenames in os.walk(spec.prefix):
        filter_shebangs_in_directory(directory, filenames)
コード例 #17
0
ファイル: filesystem.py プロジェクト: citibeth/ectl
def install(src, dest):
    """Manually install a file to a particular location."""
    tty.debug("Installing %s to %s" % (src, dest))

    # Expand dsst to its eventual full path if it is a directory.
    if os.path.isdir(dest):
        dest = join_path(dest, os.path.basename(src))

    shutil.copy(src, dest)
    set_install_permissions(dest)
    copy_mode(src, dest)
コード例 #18
0
ファイル: module.py プロジェクト: AaronTHolt/spack
def module_refresh():
    """Regenerate all module files for installed packages known to
       spack (some packages may no longer exist)."""
    specs = [s for s in spack.db.installed_known_package_specs()]

    for name, cls in module_types.items():
        tty.msg("Regenerating %s module files." % name)
        if os.path.isdir(cls.path):
            shutil.rmtree(cls.path, ignore_errors=False)
        mkdirp(cls.path)
        for spec in specs:
            tty.debug("   Writing file for %s." % spec)
            cls(spec).write()
コード例 #19
0
ファイル: stage.py プロジェクト: Exteris/spack
    def fetch(self, mirror_only=False):
        """Downloads an archive or checks out code from a repository."""
        self.chdir()

        fetchers = []
        if not mirror_only:
            fetchers.append(self.default_fetcher)

        # TODO: move mirror logic out of here and clean it up!
        # TODO: Or @alalazo may have some ideas about how to use a
        # TODO: CompositeFetchStrategy here.
        self.skip_checksum_for_mirror = True
        if self.mirror_path:
            mirrors = spack.config.get_config('mirrors')

            # Join URLs of mirror roots with mirror paths. Because
            # urljoin() will strip everything past the final '/' in
            # the root, so we add a '/' if it is not present.
            mirror_roots = [root if root.endswith('/') else root + '/'
                            for root in mirrors.values()]
            urls = [urljoin(root, self.mirror_path) for root in mirror_roots]

            # If this archive is normally fetched from a tarball URL,
            # then use the same digest.  `spack mirror` ensures that
            # the checksum will be the same.
            digest = None
            if isinstance(self.default_fetcher, fs.URLFetchStrategy):
                digest = self.default_fetcher.digest

            # Have to skip the checksum for things archived from
            # repositories.  How can this be made safer?
            self.skip_checksum_for_mirror = not bool(digest)

            # Add URL strategies for all the mirrors with the digest
            for url in urls:
                fetchers.insert(0, fs.URLFetchStrategy(url, digest))

        for fetcher in fetchers:
            try:
                fetcher.set_stage(self)
                self.fetcher = fetcher
                self.fetcher.fetch()
                break
            except spack.error.SpackError as e:
                tty.msg("Fetching from %s failed." % fetcher)
                tty.debug(e)
                continue
        else:
            errMessage = "All fetchers failed for %s" % self.name
            self.fetcher = self.default_fetcher
            raise fs.FetchError(errMessage, None)
コード例 #20
0
ファイル: url.py プロジェクト: trws/spack
def strip_query_and_fragment(path):
    try:
        components = urlsplit(path)
        stripped = components[:3] + (None, None)

        query, frag = components[3:5]
        suffix = ''
        if query: suffix += '?' + query
        if frag:  suffix += '#' + frag

        return (urlunsplit(stripped), suffix)

    except ValueError:
        tty.debug("Got error parsing path %s" % path)
        return (path, '')  # Ignore URL parse errors here
コード例 #21
0
ファイル: compiler.py プロジェクト: justintoo/spack
 def check(key):
     try:
         full_path, prefix, suffix = key
         version = detect_version(full_path)
         return (version, prefix, suffix, full_path)
     except ProcessError as e:
         tty.debug(
             "Couldn't get version for compiler %s" % full_path, e)
         return None
     except Exception as e:
         # Catching "Exception" here is fine because it just
         # means something went wrong running a candidate executable.
         tty.debug("Error while executing candidate compiler %s"
                   % full_path,
                   "%s: %s" % (e.__class__.__name__, e))
         return None
コード例 #22
0
ファイル: modules.py プロジェクト: Exteris/spack
def filter_blacklisted(specs, module_name):
    """
    Given a sequence of specs, filters the ones that are blacklisted in the
    module configuration file.

    Args:
        specs: sequence of spec instances
        module_name: type of module file objects

    Yields:
        non blacklisted specs
    """
    for x in specs:
        if module_types[module_name](x).blacklisted:
            tty.debug('\tFILTER : %s' % x)
            continue
        yield x
コード例 #23
0
ファイル: relocate.py プロジェクト: LLNL/spack
def get_existing_elf_rpaths(path_name):
    """
    Return the RPATHS returned by patchelf --print-rpath path_name
    as a list of strings.
    """
    if platform.system() == 'Linux':
        patchelf = Executable(get_patchelf())
        try:
            output = patchelf('--print-rpath', '%s' %
                              path_name, output=str, error=str)
            return output.rstrip('\n').split(':')
        except ProcessError as e:
            tty.debug('patchelf --print-rpath produced an error on %s' %
                      path_name, e)
            return []
    else:
        tty.die('relocation not supported for this platform')
    return
コード例 #24
0
ファイル: versions.py プロジェクト: d-tk/spack
def versions(parser, args):
    pkg = spack.repo.get(args.package)

    safe_versions = pkg.versions
    fetched_versions = pkg.fetch_remote_versions()
    remote_versions = set(fetched_versions).difference(safe_versions)

    tty.msg("Safe versions (already checksummed):")
    colify(sorted(safe_versions, reverse=True), indent=2)

    tty.msg("Remote versions (not yet checksummed):")
    if not remote_versions:
        if not fetched_versions:
            print "  Found no versions for %s" % pkg.name
            tty.debug("Check the list_url and list_depth attribute on the " "package to help Spack find versions.")
        else:
            print "  Found no unckecksummed versions for %s" % pkg.name
    else:
        colify(sorted(remote_versions, reverse=True), indent=2)
コード例 #25
0
ファイル: stage.py プロジェクト: LLNL/spack
def _first_accessible_path(paths):
    """Find a tmp dir that exists that we can access."""
    for path in paths:
        try:
            # try to create the path if it doesn't exist.
            path = canonicalize_path(path)
            mkdirp(path)

            # ensure accessible
            if not can_access(path):
                continue

            # return it if successful.
            return path

        except OSError:
            tty.debug('OSError while checking temporary path: %s' % path)
            continue

    return None
コード例 #26
0
ファイル: lock.py プロジェクト: justintoo/spack
    def acquire_read(self, timeout=_default_timeout):
        """Acquires a recursive, shared lock for reading.

        Read and write locks can be acquired and released in arbitrary
        order, but the POSIX lock is held until all local read and
        write locks are released.

        Returns True if it is the first acquire and actually acquires
        the POSIX lock, False if it is a nested transaction.

        """
        if self._reads == 0 and self._writes == 0:
            tty.debug('READ LOCK: {0.path}[{0._start}:{0._length}] [Acquiring]'
                      .format(self))
            self._lock(fcntl.LOCK_SH, timeout=timeout)   # can raise LockError.
            self._reads += 1
            return True
        else:
            self._reads += 1
            return False
コード例 #27
0
ファイル: executable.py プロジェクト: AaronTHolt/spack
    def __call__(self, *args, **kwargs):
        """Run the executable with subprocess.check_output, return output."""
        return_output = kwargs.get("return_output", False)
        fail_on_error = kwargs.get("fail_on_error", True)
        error         = kwargs.get("error", sys.stderr)

        quoted_args = [arg for arg in args if re.search(r'^"|^\'|"$|\'$', arg)]
        if quoted_args:
            tty.warn("Quotes in command arguments can confuse scripts like configure.",
                     "The following arguments may cause problems when executed:",
                     str("\n".join(["    "+arg for arg in quoted_args])),
                     "Quotes aren't needed because spack doesn't use a shell.",
                     "Consider removing them")

        cmd = self.exe + list(args)
        tty.debug(" ".join(cmd))

        close_error = False
        try:
            if error is None:
                error = open(os.devnull, 'w')
                close_error = True

            proc = subprocess.Popen(
                cmd,
                stderr=error,
                stdout=subprocess.PIPE if return_output else sys.stdout)
            out, err = proc.communicate()
            self.returncode = proc.returncode

            if fail_on_error and proc.returncode != 0:
                raise ProcessError("command '%s' returned error code %d"
                                   % (" ".join(cmd), proc.returncode))
            if return_output:
                return out

        except subprocess.CalledProcessError, e:
            if fail_on_error:
                raise ProcessError(
                    "command '%s' failed to run." % (
                        " ".join(cmd), proc.returncode), str(e))
コード例 #28
0
ファイル: clean.py プロジェクト: LLNL/spack
def clean(parser, args):
    # If nothing was set, activate the default
    if not any([args.specs, args.stage, args.downloads, args.misc_cache,
                args.python_cache]):
        args.stage = True

    # Then do the cleaning falling through the cases
    if args.specs:
        specs = spack.cmd.parse_specs(args.specs, concretize=True)
        for spec in specs:
            msg = 'Cleaning build stage [{0}]'
            tty.msg(msg.format(spec.short_spec))
            package = spack.repo.get(spec)
            package.do_clean()

    if args.stage:
        tty.msg('Removing all temporary build stages')
        spack.stage.purge()

    if args.downloads:
        tty.msg('Removing cached downloads')
        spack.caches.fetch_cache.destroy()

    if args.misc_cache:
        tty.msg('Removing cached information on repositories')
        spack.caches.misc_cache.destroy()

    if args.python_cache:
        tty.msg('Removing python cache files')
        for directory in [lib_path, var_path]:
            for root, dirs, files in os.walk(directory):
                for f in files:
                    if f.endswith('.pyc') or f.endswith('.pyo'):
                        fname = os.path.join(root, f)
                        tty.debug('Removing {0}'.format(fname))
                        os.remove(fname)
                for d in dirs:
                    if d == '__pycache__':
                        dname = os.path.join(root, d)
                        tty.debug('Removing {0}'.format(dname))
                        shutil.rmtree(dname)
コード例 #29
0
ファイル: filesystem.py プロジェクト: matzke1/spack
def replace_directory_transaction(directory_name, tmp_root=None):
    """Moves a directory to a temporary space. If the operations executed
    within the context manager don't raise an exception, the directory is
    deleted. If there is an exception, the move is undone.

    Args:
        directory_name (path): absolute path of the directory name
        tmp_root (path): absolute path of the parent directory where to create
            the temporary

    Returns:
        temporary directory where ``directory_name`` has been moved
    """
    # Check the input is indeed a directory with absolute path.
    # Raise before anything is done to avoid moving the wrong directory
    assert os.path.isdir(directory_name), \
        '"directory_name" must be a valid directory'
    assert os.path.isabs(directory_name), \
        '"directory_name" must contain an absolute path'

    directory_basename = os.path.basename(directory_name)

    if tmp_root is not None:
        assert os.path.isabs(tmp_root)

    tmp_dir = tempfile.mkdtemp(dir=tmp_root)
    tty.debug('TEMPORARY DIRECTORY CREATED [{0}]'.format(tmp_dir))

    shutil.move(src=directory_name, dst=tmp_dir)
    tty.debug('DIRECTORY MOVED [src={0}, dest={1}]'.format(
        directory_name, tmp_dir
    ))

    try:
        yield tmp_dir
    except (Exception, KeyboardInterrupt, SystemExit):
        # Delete what was there, before copying back the original content
        if os.path.exists(directory_name):
            shutil.rmtree(directory_name)
        shutil.move(
            src=os.path.join(tmp_dir, directory_basename),
            dst=os.path.dirname(directory_name)
        )
        tty.debug('DIRECTORY RECOVERED [{0}]'.format(directory_name))

        msg = 'the transactional move of "{0}" failed.'
        raise RuntimeError(msg.format(directory_name))
    else:
        # Otherwise delete the temporary directory
        shutil.rmtree(tmp_dir)
        tty.debug('TEMPORARY DIRECTORY DELETED [{0}]'.format(tmp_dir))
コード例 #30
0
ファイル: lock.py プロジェクト: justintoo/spack
    def release_write(self):
        """Releases a write lock.

        Returns True if the last recursive lock was released, False if
        there are still outstanding locks.

        Does limited correctness checking: if a read lock is released
        when none are held, this will raise an assertion error.

        """
        assert self._writes > 0

        if self._writes == 1 and self._reads == 0:
            tty.debug('WRITE LOCK: {0.path}[{0._start}:{0._length}] [Released]'
                      .format(self))
            self._unlock()      # can raise LockError.
            self._writes -= 1
            return True
        else:
            self._writes -= 1
            return False
コード例 #31
0
ファイル: buildcache.py プロジェクト: tomdele/spack
def buildcache_copy(args):
    """Copy a buildcache entry and all its files from one mirror, given as
    '--base-dir', to some other mirror, specified as '--destination-url'.
    The specific buildcache entry to be copied from one location to the
    other is identified using the '--spec-yaml' argument."""
    # TODO: This sub-command should go away once #11117 is merged

    if not args.spec_yaml:
        tty.msg('No spec yaml provided, exiting.')
        sys.exit(1)

    if not args.base_dir:
        tty.msg('No base directory provided, exiting.')
        sys.exit(1)

    if not args.destination_url:
        tty.msg('No destination mirror url provided, exiting.')
        sys.exit(1)

    dest_url = args.destination_url

    if dest_url[0:7] != 'file://' and dest_url[0] != '/':
        tty.msg('Only urls beginning with "file://" or "/" are supported ' +
                'by buildcache copy.')
        sys.exit(1)

    try:
        with open(args.spec_yaml, 'r') as fd:
            spec = Spec.from_yaml(fd.read())
    except Exception as e:
        tty.debug(e)
        tty.error('Unable to concrectize spec from yaml {0}'.format(
            args.spec_yaml))
        sys.exit(1)

    dest_root_path = dest_url
    if dest_url[0:7] == 'file://':
        dest_root_path = dest_url[7:]

    build_cache_dir = bindist.build_cache_relative_path()

    tarball_rel_path = os.path.join(build_cache_dir,
                                    bindist.tarball_path_name(spec, '.spack'))
    tarball_src_path = os.path.join(args.base_dir, tarball_rel_path)
    tarball_dest_path = os.path.join(dest_root_path, tarball_rel_path)

    specfile_rel_path = os.path.join(build_cache_dir,
                                     bindist.tarball_name(spec, '.spec.yaml'))
    specfile_src_path = os.path.join(args.base_dir, specfile_rel_path)
    specfile_dest_path = os.path.join(dest_root_path, specfile_rel_path)

    cdashidfile_rel_path = os.path.join(build_cache_dir,
                                        bindist.tarball_name(spec, '.cdashid'))
    cdashid_src_path = os.path.join(args.base_dir, cdashidfile_rel_path)
    cdashid_dest_path = os.path.join(dest_root_path, cdashidfile_rel_path)

    # Make sure directory structure exists before attempting to copy
    os.makedirs(os.path.dirname(tarball_dest_path))

    # Now copy the specfile and tarball files to the destination mirror
    tty.msg('Copying {0}'.format(tarball_rel_path))
    shutil.copyfile(tarball_src_path, tarball_dest_path)

    tty.msg('Copying {0}'.format(specfile_rel_path))
    shutil.copyfile(specfile_src_path, specfile_dest_path)

    # Copy the cdashid file (if exists) to the destination mirror
    if os.path.exists(cdashid_src_path):
        tty.msg('Copying {0}'.format(cdashidfile_rel_path))
        shutil.copyfile(cdashid_src_path, cdashid_dest_path)
コード例 #32
0
ファイル: stage.py プロジェクト: tvandera/spack
 def check(self):
     tty.debug('No checksum needed for DIY.')
コード例 #33
0
    def write(self, overwrite=False):
        """Writes the module file.

        Args:
            overwrite (bool): if True it is fine to overwrite an already
                existing file. If False the operation is skipped an we print
                a warning to the user.
        """
        # Return immediately if the module is blacklisted
        if self.conf.blacklisted:
            msg = '\tNOT WRITING: {0} [BLACKLISTED]'
            tty.debug(msg.format(self.spec.cshort_spec))
            return

        # Print a warning in case I am accidentally overwriting
        # a module file that is already there (name clash)
        if not overwrite and os.path.exists(self.layout.filename):
            message = 'Module file already exists : skipping creation\n'
            message += 'file : {0.filename}\n'
            message += 'spec : {0.spec}'
            tty.warn(message.format(self.layout))
            return

        # If we are here it means it's ok to write the module file
        msg = '\tWRITE: {0} [{1}]'
        tty.debug(msg.format(self.spec.cshort_spec, self.layout.filename))

        # If the directory where the module should reside does not exist
        # create it
        module_dir = os.path.dirname(self.layout.filename)
        if not os.path.exists(module_dir):
            llnl.util.filesystem.mkdirp(module_dir)

        # Get the template for the module
        template_name = self._get_template()
        try:
            env = tengine.make_environment()
            template = env.get_template(template_name)
        except tengine.TemplateNotFound:
            # If the template was not found raise an exception with a little
            # more information
            msg = 'template \'{0}\' was not found for \'{1}\''
            name = type(self).__name__
            msg = msg.format(template_name, name)
            raise ModulesTemplateNotFoundError(msg)

        # Construct the context following the usual hierarchy of updates:
        # 1. start with the default context from the module writer class
        # 2. update with package specific context
        # 3. update with 'modules.yaml' specific context

        context = self.context.to_dict()

        # Attribute from package
        module_name = str(self.module.__name__).split('.')[-1]
        attr_name = '{0}_context'.format(module_name)
        pkg_update = getattr(self.spec.package, attr_name, {})
        context.update(pkg_update)

        # Context key in modules.yaml
        conf_update = self.conf.context
        context.update(conf_update)

        # Render the template
        text = template.render(context)
        # Write it to file
        with open(self.layout.filename, 'w') as f:
            f.write(text)

        # Set the file permissions of the module to match that of the package
        if os.path.exists(self.layout.filename):
            fp.set_permissions_by_spec(self.layout.filename, self.spec)
コード例 #34
0
    def write(self, overwrite=False):
        """
        Writes out a module file for this object.

        This method employs a template pattern and expects derived classes to:
        - override the header property
        - provide formats for autoload, prerequisites and environment changes
        """
        if self.blacklisted:
            return
        tty.debug("\tWRITE : %s [%s]" %
                  (self.spec.cshort_spec, self.file_name))

        module_dir = os.path.dirname(self.file_name)
        if not os.path.exists(module_dir):
            mkdirp(module_dir)

        # Environment modifications guessed by inspecting the
        # installation prefix
        env = inspect_path(self.spec.prefix)

        # Let the extendee/dependency modify their extensions/dependencies
        # before asking for package-specific modifications
        spack_env = EnvironmentModifications()
        # TODO : the code down below is quite similar to
        # TODO : build_environment.setup_package and needs to be factored out
        # TODO : to a single place
        for item in dependencies(self.spec, 'all'):
            package = self.spec[item.name].package
            modules = parent_class_modules(package.__class__)
            for mod in modules:
                set_module_variables_for_package(package, mod)
            set_module_variables_for_package(package, package.module)
            package.setup_dependent_package(self.pkg.module, self.spec)
            package.setup_dependent_environment(spack_env, env, self.spec)

        # Package-specific environment modifications
        set_module_variables_for_package(self.pkg, self.pkg.module)
        self.spec.package.setup_environment(spack_env, env)

        # Parse configuration file
        module_configuration, conf_env = parse_config_options(self)
        env.extend(conf_env)
        filters = module_configuration.get('filter', {}).get(
            'environment_blacklist', {})
        # Build up the module file content
        module_file_content = self.header
        for x in filter_blacklisted(
                module_configuration.pop('autoload', []), self.name):
            module_file_content += self.autoload(x)
        for x in module_configuration.pop('load', []):
            module_file_content += self.autoload(x)
        for x in filter_blacklisted(
                module_configuration.pop('prerequisites', []), self.name):
            module_file_content += self.prerequisite(x)
        for line in self.process_environment_command(
                filter_environment_blacklist(env, filters)):
            module_file_content += line
        for line in self.module_specific_content(module_configuration):
            module_file_content += line

        # Print a warning in case I am accidentally overwriting
        # a module file that is already there (name clash)
        if not overwrite and os.path.exists(self.file_name):
            message = 'Module file already exists : skipping creation\n'
            message += 'file : {0.file_name}\n'
            message += 'spec : {0.spec}'
            tty.warn(message.format(self))
            return

        # Dump to file
        with open(self.file_name, 'w') as f:
            f.write(module_file_content)
コード例 #35
0
ファイル: stage.py プロジェクト: tvandera/spack
 def cache_local(self):
     tty.debug('Sources for DIY stages are not cached')
コード例 #36
0
ファイル: web.py プロジェクト: MiddelkoopT/spack
    def _spider(url, collect_nested):
        """Fetches URL and any pages it links to.

        Prints out a warning only if the root can't be fetched; it ignores
        errors with pages that the root links to.

        Args:
            url (str): url being fetched and searched for links
            collect_nested (bool): whether we want to collect arguments
                for nested spidering on the links found in this url

        Returns:
            A tuple of:
            - pages: dict of pages visited (URL) mapped to their full text.
            - links: set of links encountered while visiting the pages.
            - spider_args: argument for subsequent call to spider
        """
        pages = {}  # dict from page URL -> text content.
        links = set()  # set of all links seen on visited pages.
        subcalls = []

        try:
            response_url, _, response = read_from_url(url, 'text/html')
            if not response_url or not response:
                return pages, links, subcalls

            page = codecs.getreader('utf-8')(response).read()
            pages[response_url] = page

            # Parse out the links in the page
            link_parser = LinkParser()
            link_parser.feed(page)

            while link_parser.links:
                raw_link = link_parser.links.pop()
                abs_link = url_util.join(response_url,
                                         raw_link.strip(),
                                         resolve_href=True)
                links.add(abs_link)

                # Skip stuff that looks like an archive
                if any(raw_link.endswith(s) for s in ALLOWED_ARCHIVE_TYPES):
                    continue

                # Skip already-visited links
                if abs_link in _visited:
                    continue

                # If we're not at max depth, follow links.
                if collect_nested:
                    subcalls.append((abs_link, ))
                    _visited.add(abs_link)

        except URLError as e:
            tty.debug(str(e))

            if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError):
                tty.warn("Spack was unable to fetch url list due to a "
                         "certificate verification problem. You can try "
                         "running spack -k, which will not check SSL "
                         "certificates. Use this at your own risk.")

        except HTMLParseError as e:
            # This error indicates that Python's HTML parser sucks.
            msg = "Got an error parsing HTML."

            # Pre-2.7.3 Pythons in particular have rather prickly HTML parsing.
            if sys.version_info[:3] < (2, 7, 3):
                msg += " Use Python 2.7.3 or newer for better HTML parsing."

            tty.warn(msg, url, "HTMLParseError: " + str(e))

        except Exception as e:
            # Other types of errors are completely ignored,
            # except in debug mode
            tty.debug("Error in _spider: %s:%s" % (type(e), str(e)),
                      traceback.format_exc())

        finally:
            tty.debug("SPIDER: [url={0}]".format(url))

        return pages, links, subcalls
コード例 #37
0
ファイル: web.py プロジェクト: MiddelkoopT/spack
def spider(root_urls, depth=0, concurrency=32):
    """Get web pages from root URLs.

    If depth is specified (e.g., depth=2), then this will also follow
    up to <depth> levels of links from each root.

    Args:
        root_urls (str or list of str): root urls used as a starting point
            for spidering
        depth (int): level of recursion into links
        concurrency (int): number of simultaneous requests that can be sent

    Returns:
        A dict of pages visited (URL) mapped to their full text and the
        set of visited links.
    """
    # Cache of visited links, meant to be captured by the closure below
    _visited = set()

    def _spider(url, collect_nested):
        """Fetches URL and any pages it links to.

        Prints out a warning only if the root can't be fetched; it ignores
        errors with pages that the root links to.

        Args:
            url (str): url being fetched and searched for links
            collect_nested (bool): whether we want to collect arguments
                for nested spidering on the links found in this url

        Returns:
            A tuple of:
            - pages: dict of pages visited (URL) mapped to their full text.
            - links: set of links encountered while visiting the pages.
            - spider_args: argument for subsequent call to spider
        """
        pages = {}  # dict from page URL -> text content.
        links = set()  # set of all links seen on visited pages.
        subcalls = []

        try:
            response_url, _, response = read_from_url(url, 'text/html')
            if not response_url or not response:
                return pages, links, subcalls

            page = codecs.getreader('utf-8')(response).read()
            pages[response_url] = page

            # Parse out the links in the page
            link_parser = LinkParser()
            link_parser.feed(page)

            while link_parser.links:
                raw_link = link_parser.links.pop()
                abs_link = url_util.join(response_url,
                                         raw_link.strip(),
                                         resolve_href=True)
                links.add(abs_link)

                # Skip stuff that looks like an archive
                if any(raw_link.endswith(s) for s in ALLOWED_ARCHIVE_TYPES):
                    continue

                # Skip already-visited links
                if abs_link in _visited:
                    continue

                # If we're not at max depth, follow links.
                if collect_nested:
                    subcalls.append((abs_link, ))
                    _visited.add(abs_link)

        except URLError as e:
            tty.debug(str(e))

            if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError):
                tty.warn("Spack was unable to fetch url list due to a "
                         "certificate verification problem. You can try "
                         "running spack -k, which will not check SSL "
                         "certificates. Use this at your own risk.")

        except HTMLParseError as e:
            # This error indicates that Python's HTML parser sucks.
            msg = "Got an error parsing HTML."

            # Pre-2.7.3 Pythons in particular have rather prickly HTML parsing.
            if sys.version_info[:3] < (2, 7, 3):
                msg += " Use Python 2.7.3 or newer for better HTML parsing."

            tty.warn(msg, url, "HTMLParseError: " + str(e))

        except Exception as e:
            # Other types of errors are completely ignored,
            # except in debug mode
            tty.debug("Error in _spider: %s:%s" % (type(e), str(e)),
                      traceback.format_exc())

        finally:
            tty.debug("SPIDER: [url={0}]".format(url))

        return pages, links, subcalls

    # TODO: Needed until we drop support for Python 2.X
    def star(func):
        def _wrapper(args):
            return func(*args)

        return _wrapper

    if isinstance(root_urls, six.string_types):
        root_urls = [root_urls]

    # Clear the local cache of visited pages before starting the search
    _visited.clear()

    current_depth = 0
    pages, links, spider_args = {}, set(), []

    collect = current_depth < depth
    for root in root_urls:
        root = url_util.parse(root)
        spider_args.append((root, collect))

    tp = multiprocessing.pool.ThreadPool(processes=concurrency)
    try:
        while current_depth <= depth:
            tty.debug("SPIDER: [depth={0}, max_depth={1}, urls={2}]".format(
                current_depth, depth, len(spider_args)))
            results = tp.map(star(_spider), spider_args)
            spider_args = []
            collect = current_depth < depth
            for sub_pages, sub_links, sub_spider_args in results:
                sub_spider_args = [x + (collect, ) for x in sub_spider_args]
                pages.update(sub_pages)
                links.update(sub_links)
                spider_args.extend(sub_spider_args)

            current_depth += 1
    finally:
        tp.terminate()
        tp.join()

    return pages, links
コード例 #38
0
ファイル: release_jobs.py プロジェクト: lorak41/spack
def release_jobs(parser, args):
    share_path = os.path.join(spack_root, 'share', 'spack', 'docker')
    os_container_mapping_path = os.path.join(share_path,
                                             'os-container-mapping.yaml')

    with open(os_container_mapping_path, 'r') as fin:
        os_container_mapping = syaml.load(fin)

    try:
        validate(os_container_mapping, mapping_schema)
    except ValidationError as val_err:
        tty.error('Ill-formed os-container-mapping configuration object')
        tty.error(os_container_mapping)
        tty.debug(val_err)
        return

    containers = os_container_mapping['containers']

    if args.specs:
        # Just print out the spec labels and all dependency edges in
        # a json format.
        spec_list = [Spec(s) for s in args.specs]
        with open(args.specs_deps_output, 'w') as out:
            compute_spec_deps(spec_list, out)
        return

    current_system = sys_type() if args.resolve_deps_locally else None

    release_specs_path = args.spec_set
    if not release_specs_path:
        raise SpackError('Must provide path to release spec-set')

    release_spec_set = CombinatorialSpecSet.from_file(release_specs_path)

    mirror_url = args.mirror_url

    if not mirror_url:
        raise SpackError('Must provide url of target binary mirror')

    cdash_url = args.cdash_url

    spec_labels, dependencies, stages = stage_spec_jobs(
        release_spec_set, containers, current_system)

    if not stages:
        tty.msg('No jobs staged, exiting.')
        return

    if args.print_summary:
        print_staging_summary(spec_labels, dependencies, stages)

    output_object = {}
    job_count = 0

    stage_names = ['stage-{0}'.format(i) for i in range(len(stages))]
    stage = 0

    for stage_jobs in stages:
        stage_name = stage_names[stage]

        for spec_label in stage_jobs:
            release_spec = spec_labels[spec_label]['spec']
            root_spec = spec_labels[spec_label]['rootSpec']

            pkg_compiler = release_spec.compiler
            pkg_hash = release_spec.dag_hash()

            osname = str(release_spec.architecture)
            job_name = get_job_name(release_spec, osname)
            container_info = containers[osname]
            build_image = container_info['image']

            job_scripts = ['./bin/rebuild-package.sh']

            if 'setup_script' in container_info:
                job_scripts.insert(
                    0, container_info['setup_script'] % pkg_compiler)

            job_dependencies = []
            if spec_label in dependencies:
                job_dependencies = ([
                    get_job_name(spec_labels[dep_label]['spec'], osname)
                    for dep_label in dependencies[spec_label]
                ])

            job_object = {
                'stage': stage_name,
                'variables': {
                    'MIRROR_URL': mirror_url,
                    'CDASH_BASE_URL': cdash_url,
                    'HASH': pkg_hash,
                    'DEPENDENCIES': ';'.join(job_dependencies),
                    'ROOT_SPEC': str(root_spec),
                },
                'script': job_scripts,
                'image': build_image,
                'artifacts': {
                    'paths': [
                        'local_mirror/build_cache',
                        'jobs_scratch_dir',
                        'cdash_report',
                    ],
                    'when':
                    'always',
                },
                'dependencies': job_dependencies,
            }

            # If we see 'compilers' in the container iformation, it's a
            # filter for the compilers this container can handle, else we
            # assume it can handle any compiler
            if 'compilers' in container_info:
                do_job = False
                for item in container_info['compilers']:
                    container_compiler_spec = CompilerSpec(item['name'])
                    if pkg_compiler == container_compiler_spec:
                        do_job = True
            else:
                do_job = True

            if args.shared_runner_tag:
                job_object['tags'] = [args.shared_runner_tag]

            if args.signing_key:
                job_object['variables']['SIGN_KEY_HASH'] = args.signing_key

            if do_job:
                output_object[job_name] = job_object
                job_count += 1

        stage += 1

    tty.msg('{0} build jobs generated in {1} stages'.format(
        job_count, len(stages)))

    final_stage = 'stage-rebuild-index'

    final_job = {
        'stage': final_stage,
        'variables': {
            'MIRROR_URL': mirror_url,
        },
        'image': build_image,
        'script': './bin/rebuild-index.sh',
    }

    if args.shared_runner_tag:
        final_job['tags'] = [args.shared_runner_tag]

    output_object['rebuild-index'] = final_job
    stage_names.append(final_stage)
    output_object['stages'] = stage_names

    with open(args.output_file, 'w') as outf:
        outf.write(syaml.dump(output_object))
コード例 #39
0
ファイル: install.py プロジェクト: lsuhpchelp/lsuhpcspack
def install(parser, args, **kwargs):
    if args.help_cdash:
        parser = argparse.ArgumentParser(
            formatter_class=argparse.RawDescriptionHelpFormatter,
            epilog=textwrap.dedent('''\
environment variables:
  SPACK_CDASH_AUTH_TOKEN
                        authentication token to present to CDash
                        '''))
        add_cdash_args(parser, True)
        parser.print_help()
        return

    if not args.spec and not args.specfiles:
        # if there are no args but an active environment or spack.yaml file
        # then install the packages from it.
        env = ev.get_env(args, 'install')
        if env:
            if not args.only_concrete:
                with env.write_transaction():
                    concretized_specs = env.concretize()
                    ev.display_specs(concretized_specs)

                    # save view regeneration for later, so that we only do it
                    # once, as it can be slow.
                    env.write(regenerate_views=False)

            tty.msg("Installing environment %s" % env.name)
            env.install_all(args)
            with env.write_transaction():
                # It is not strictly required to synchronize view regeneration
                # but doing so can prevent redundant work in the filesystem.
                env.regenerate_views()
            return
        else:
            tty.die("install requires a package argument or a spack.yaml file")

    if args.no_checksum:
        spack.config.set('config:checksum', False, scope='command_line')

    # Parse cli arguments and construct a dictionary
    # that will be passed to Package.do_install API
    update_kwargs_from_args(args, kwargs)

    if args.run_tests:
        tty.warn("Deprecated option: --run-tests: use --test=all instead")

    # 1. Abstract specs from cli
    reporter = spack.report.collect_info(args.log_format, args)
    if args.log_file:
        reporter.filename = args.log_file

    abstract_specs = spack.cmd.parse_specs(args.spec)
    tests = False
    if args.test == 'all' or args.run_tests:
        tests = True
    elif args.test == 'root':
        tests = [spec.name for spec in abstract_specs]
    kwargs['tests'] = tests

    try:
        specs = spack.cmd.parse_specs(args.spec, concretize=True, tests=tests)
    except SpackError as e:
        tty.debug(e)
        reporter.concretization_report(e.message)
        raise

    # 2. Concrete specs from yaml files
    for file in args.specfiles:
        with open(file, 'r') as f:
            s = spack.spec.Spec.from_yaml(f)

        if s.concretized().dag_hash() != s.dag_hash():
            msg = 'skipped invalid file "{0}". '
            msg += 'The file does not contain a concrete spec.'
            tty.warn(msg.format(file))
            continue

        abstract_specs.append(s)
        specs.append(s.concretized())

    if len(specs) == 0:
        tty.die('The `spack install` command requires a spec to install.')

    if not args.log_file and not reporter.filename:
        reporter.filename = default_log_file(specs[0])
    reporter.specs = specs
    with reporter:
        if args.overwrite:

            installed = list(
                filter(lambda x: x, map(spack.store.db.query_one, specs)))
            if not args.yes_to_all:
                display_args = {
                    'long': True,
                    'show_flags': True,
                    'variants': True
                }

                if installed:
                    tty.msg('The following package specs will be '
                            'reinstalled:\n')
                    spack.cmd.display_specs(installed, **display_args)

                not_installed = list(
                    filter(lambda x: x not in installed, specs))
                if not_installed:
                    tty.msg('The following package specs are not installed and'
                            ' the --overwrite flag was given. The package spec'
                            ' will be newly installed:\n')
                    spack.cmd.display_specs(not_installed, **display_args)

                # We have some specs, so one of the above must have been true
                answer = tty.get_yes_or_no('Do you want to proceed?',
                                           default=False)
                if not answer:
                    tty.die('Reinstallation aborted.')

            for abstract, concrete in zip(abstract_specs, specs):
                if concrete in installed:
                    with fs.replace_directory_transaction(concrete.prefix):
                        install_spec(args, kwargs, abstract, concrete)
                else:
                    install_spec(args, kwargs, abstract, concrete)

        else:
            for abstract, concrete in zip(abstract_specs, specs):
                install_spec(args, kwargs, abstract, concrete)
コード例 #40
0
ファイル: buildcache.py プロジェクト: tomdele/spack
def _createtarball(env,
                   spec_yaml=None,
                   packages=None,
                   add_spec=True,
                   add_deps=True,
                   output_location=os.getcwd(),
                   signing_key=None,
                   force=False,
                   make_relative=False,
                   unsigned=False,
                   allow_root=False,
                   rebuild_index=False):
    if spec_yaml:
        with open(spec_yaml, 'r') as fd:
            yaml_text = fd.read()
            tty.debug('createtarball read spec yaml:')
            tty.debug(yaml_text)
            s = Spec.from_yaml(yaml_text)
            package = '/{0}'.format(s.dag_hash())
            matches = find_matching_specs(package, env=env)

    elif packages:
        matches = find_matching_specs(packages, env=env)

    elif env:
        matches = [env.specs_by_hash[h] for h in env.concretized_order]

    else:
        tty.die("build cache file creation requires at least one" +
                " installed package spec, an active environment," +
                " or else a path to a yaml file containing a spec" +
                " to install")
    specs = set()

    mirror = spack.mirror.MirrorCollection().lookup(output_location)
    outdir = url_util.format(mirror.push_url)

    msg = 'Buildcache files will be output to %s/build_cache' % outdir
    tty.msg(msg)

    if matches:
        tty.debug('Found at least one matching spec')

    for match in matches:
        tty.debug('examining match {0}'.format(match.format()))
        if match.external or match.virtual:
            tty.debug('skipping external or virtual spec %s' % match.format())
        else:
            lookup = spack.store.db.query_one(match)

            if not add_spec:
                tty.debug('skipping matching root spec %s' % match.format())
            elif lookup is None:
                tty.debug('skipping uninstalled matching spec %s' %
                          match.format())
            else:
                tty.debug('adding matching spec %s' % match.format())
                specs.add(match)

            if not add_deps:
                continue

            tty.debug('recursing dependencies')
            for d, node in match.traverse(order='post',
                                          depth=True,
                                          deptype=('link', 'run')):
                # skip root, since it's handled above
                if d == 0:
                    continue

                lookup = spack.store.db.query_one(node)

                if node.external or node.virtual:
                    tty.debug('skipping external or virtual dependency %s' %
                              node.format())
                elif lookup is None:
                    tty.debug('skipping uninstalled depenendency %s' %
                              node.format())
                else:
                    tty.debug('adding dependency %s' % node.format())
                    specs.add(node)

    tty.debug('writing tarballs to %s/build_cache' % outdir)

    for spec in specs:
        tty.debug('creating binary cache file for package %s ' % spec.format())
        try:
            bindist.build_tarball(spec, outdir, force, make_relative, unsigned,
                                  allow_root, signing_key, rebuild_index)
        except bindist.NoOverwriteException as e:
            tty.warn(e)
コード例 #41
0
ファイル: build_environment.py プロジェクト: timkphd/spack
def set_build_environment_variables(pkg, env, dirty):
    """Ensure a clean install environment when we build packages.

    This involves unsetting pesky environment variables that may
    affect the build. It also involves setting environment variables
    used by Spack's compiler wrappers.

    Args:
        pkg: The package we are building
        env: The build environment
        dirty (bool): Skip unsetting the user's environment settings
    """
    # Gather information about various types of dependencies
    build_deps      = set(pkg.spec.dependencies(deptype=('build', 'test')))
    link_deps       = set(pkg.spec.traverse(root=False, deptype=('link')))
    build_link_deps = build_deps | link_deps
    rpath_deps      = get_rpath_deps(pkg)

    link_dirs = []
    include_dirs = []
    rpath_dirs = []

    # The top-level package is always RPATHed. It hasn't been installed yet
    # so the RPATHs are added unconditionally (e.g. even though lib64/ may
    # not be created for the install).
    for libdir in ['lib', 'lib64']:
        lib_path = os.path.join(pkg.prefix, libdir)
        rpath_dirs.append(lib_path)

    # Set up link, include, RPATH directories that are passed to the
    # compiler wrapper
    for dep in link_deps:
        if is_system_path(dep.prefix):
            continue
        query = pkg.spec[dep.name]
        dep_link_dirs = list()
        try:
            dep_link_dirs.extend(query.libs.directories)
        except NoLibrariesError:
            tty.debug("No libraries found for {0}".format(dep.name))

        for default_lib_dir in ['lib', 'lib64']:
            default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
            if os.path.isdir(default_lib_prefix):
                dep_link_dirs.append(default_lib_prefix)

        link_dirs.extend(dep_link_dirs)
        if dep in rpath_deps:
            rpath_dirs.extend(dep_link_dirs)

        try:
            include_dirs.extend(query.headers.directories)
        except NoHeadersError:
            tty.debug("No headers found for {0}".format(dep.name))

    link_dirs = list(dedupe(filter_system_paths(link_dirs)))
    include_dirs = list(dedupe(filter_system_paths(include_dirs)))
    rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))

    env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
    env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
    env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))

    build_prefixes      = [dep.prefix for dep in build_deps]
    build_link_prefixes = [dep.prefix for dep in build_link_deps]

    # add run-time dependencies of direct build-time dependencies:
    for build_dep in build_deps:
        for run_dep in build_dep.traverse(deptype='run'):
            build_prefixes.append(run_dep.prefix)

    # Filter out system paths: ['/', '/usr', '/usr/local']
    # These paths can be introduced into the build when an external package
    # is added as a dependency. The problem with these paths is that they often
    # contain hundreds of other packages installed in the same directory.
    # If these paths come first, they can overshadow Spack installations.
    build_prefixes      = filter_system_paths(build_prefixes)
    build_link_prefixes = filter_system_paths(build_link_prefixes)

    # Add dependencies to CMAKE_PREFIX_PATH
    env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)

    # Set environment variables if specified for
    # the given compiler
    compiler = pkg.compiler
    environment = compiler.environment

    for command, variable in iteritems(environment):
        if command == 'set':
            for name, value in iteritems(variable):
                env.set(name, value)
        elif command == 'unset':
            for name, _ in iteritems(variable):
                env.unset(name)
        elif command == 'prepend-path':
            for name, value in iteritems(variable):
                env.prepend_path(name, value)
        elif command == 'append-path':
            for name, value in iteritems(variable):
                env.append_path(name, value)

    if compiler.extra_rpaths:
        extra_rpaths = ':'.join(compiler.extra_rpaths)
        env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)

    implicit_rpaths = compiler.implicit_rpaths()
    if implicit_rpaths:
        env.set('SPACK_COMPILER_IMPLICIT_RPATHS', ':'.join(implicit_rpaths))

    # Add bin directories from dependencies to the PATH for the build.
    for prefix in build_prefixes:
        for dirname in ['bin', 'bin64']:
            bin_dir = os.path.join(prefix, dirname)
            if os.path.isdir(bin_dir):
                env.prepend_path('PATH', bin_dir)

    # Add spack build environment path with compiler wrappers first in
    # the path. We add the compiler wrapper path, which includes default
    # wrappers (cc, c++, f77, f90), AND a subdirectory containing
    # compiler-specific symlinks.  The latter ensures that builds that
    # are sensitive to the *name* of the compiler see the right name when
    # we're building with the wrappers.
    #
    # Conflicts on case-insensitive systems (like "CC" and "cc") are
    # handled by putting one in the <build_env_path>/case-insensitive
    # directory.  Add that to the path too.
    env_paths = []
    compiler_specific = os.path.join(
        spack.paths.build_env_path, pkg.compiler.name)
    for item in [spack.paths.build_env_path, compiler_specific]:
        env_paths.append(item)
        ci = os.path.join(item, 'case-insensitive')
        if os.path.isdir(ci):
            env_paths.append(ci)

    for item in env_paths:
        env.prepend_path('PATH', item)
    env.set_path(SPACK_ENV_PATH, env_paths)

    # Working directory for the spack command itself, for debug logs.
    if spack.config.get('config:debug'):
        env.set(SPACK_DEBUG, 'TRUE')
    env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
    env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
    env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)

    # Find ccache binary and hand it to build environment
    if spack.config.get('config:ccache'):
        ccache = Executable('ccache')
        if not ccache:
            raise RuntimeError("No ccache binary found in PATH")
        env.set(SPACK_CCACHE_BINARY, ccache)

    # Add any pkgconfig directories to PKG_CONFIG_PATH
    for prefix in build_link_prefixes:
        for directory in ('lib', 'lib64', 'share'):
            pcdir = os.path.join(prefix, directory, 'pkgconfig')
            if os.path.isdir(pcdir):
                env.prepend_path('PKG_CONFIG_PATH', pcdir)

    return env
コード例 #42
0
def needs_rebuild(spec, mirror_url, rebuild_on_errors=False):
    if not spec.concrete:
        raise ValueError('spec must be concrete to check against mirror')

    pkg_name = spec.name
    pkg_version = spec.version

    pkg_hash = spec.dag_hash()
    pkg_full_hash = spec.full_hash()

    tty.debug('Checking {0}-{1}, dag_hash = {2}, full_hash = {3}'.format(
        pkg_name, pkg_version, pkg_hash, pkg_full_hash))
    tty.debug(spec.tree())

    # Try to retrieve the .spec.yaml directly, based on the known
    # format of the name, in order to determine if the package
    # needs to be rebuilt.
    cache_prefix = build_cache_prefix(mirror_url)
    spec_yaml_file_name = tarball_name(spec, '.spec.yaml')
    file_path = os.path.join(cache_prefix, spec_yaml_file_name)

    result_of_error = 'Package ({0}) will {1}be rebuilt'.format(
        spec.short_spec, '' if rebuild_on_errors else 'not ')

    try:
        _, _, yaml_file = web_util.read_from_url(file_path)
        yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
    except (URLError, web_util.SpackWebError) as url_err:
        err_msg = [
            'Unable to determine whether {0} needs rebuilding,',
            ' caught exception attempting to read from {1}.',
        ]
        tty.error(''.join(err_msg).format(spec.short_spec, file_path))
        tty.debug(url_err)
        tty.warn(result_of_error)
        return rebuild_on_errors

    if not yaml_contents:
        tty.error('Reading {0} returned nothing'.format(file_path))
        tty.warn(result_of_error)
        return rebuild_on_errors

    spec_yaml = syaml.load(yaml_contents)

    # If either the full_hash didn't exist in the .spec.yaml file, or it
    # did, but didn't match the one we computed locally, then we should
    # just rebuild.  This can be simplified once the dag_hash and the
    # full_hash become the same thing.
    if ('full_hash' not in spec_yaml or
            spec_yaml['full_hash'] != pkg_full_hash):
        if 'full_hash' in spec_yaml:
            reason = 'hash mismatch, remote = {0}, local = {1}'.format(
                spec_yaml['full_hash'], pkg_full_hash)
        else:
            reason = 'full_hash was missing from remote spec.yaml'
        tty.msg('Rebuilding {0}, reason: {1}'.format(
            spec.short_spec, reason))
        tty.msg(spec.tree())
        return True

    return False
コード例 #43
0
def extract_tarball(spec, filename, allow_root=False, unsigned=False,
                    force=False):
    """
    extract binary tarball for given package into install area
    """
    if os.path.exists(spec.prefix):
        if force:
            shutil.rmtree(spec.prefix)
        else:
            raise NoOverwriteException(str(spec.prefix))

    tmpdir = tempfile.mkdtemp()
    stagepath = os.path.dirname(filename)
    spackfile_name = tarball_name(spec, '.spack')
    spackfile_path = os.path.join(stagepath, spackfile_name)
    tarfile_name = tarball_name(spec, '.tar.gz')
    tarfile_path = os.path.join(tmpdir, tarfile_name)
    specfile_name = tarball_name(spec, '.spec.yaml')
    specfile_path = os.path.join(tmpdir, specfile_name)

    with closing(tarfile.open(spackfile_path, 'r')) as tar:
        tar.extractall(tmpdir)
    # some buildcache tarfiles use bzip2 compression
    if not os.path.exists(tarfile_path):
        tarfile_name = tarball_name(spec, '.tar.bz2')
        tarfile_path = os.path.join(tmpdir, tarfile_name)
    if not unsigned:
        if os.path.exists('%s.asc' % specfile_path):
            try:
                suppress = config.get('config:suppress_gpg_warnings', False)
                Gpg.verify('%s.asc' % specfile_path, specfile_path, suppress)
            except Exception as e:
                shutil.rmtree(tmpdir)
                raise e
        else:
            shutil.rmtree(tmpdir)
            raise NoVerifyException(
                "Package spec file failed signature verification.\n"
                "Use spack buildcache keys to download "
                "and install a key for verification from the mirror.")
    # get the sha256 checksum of the tarball
    checksum = checksum_tarball(tarfile_path)

    # get the sha256 checksum recorded at creation
    spec_dict = {}
    with open(specfile_path, 'r') as inputfile:
        content = inputfile.read()
        spec_dict = syaml.load(content)
    bchecksum = spec_dict['binary_cache_checksum']

    # if the checksums don't match don't install
    if bchecksum['hash'] != checksum:
        shutil.rmtree(tmpdir)
        raise NoChecksumException(
            "Package tarball failed checksum verification.\n"
            "It cannot be installed.")

    new_relative_prefix = str(os.path.relpath(spec.prefix,
                                              spack.store.layout.root))
    # if the original relative prefix is in the spec file use it
    buildinfo = spec_dict.get('buildinfo', {})
    old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix)
    rel = buildinfo.get('relative_rpaths')
    # if the original relative prefix and new relative prefix differ the
    # directory layout has changed and the  buildcache cannot be installed
    # if it was created with relative rpaths
    info = 'old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s'
    tty.debug(info %
              (old_relative_prefix, new_relative_prefix, rel))
#    if (old_relative_prefix != new_relative_prefix and (rel)):
#        shutil.rmtree(tmpdir)
#        msg = "Package tarball was created from an install "
#        msg += "prefix with a different directory layout. "
#        msg += "It cannot be relocated because it "
#        msg += "uses relative rpaths."
#        raise NewLayoutException(msg)

    # extract the tarball in a temp directory
    with closing(tarfile.open(tarfile_path, 'r')) as tar:
        tar.extractall(path=tmpdir)
    # get the parent directory of the file .spack/binary_distribution
    # this should the directory unpacked from the tarball whose
    # name is unknown because the prefix naming is unknown
    bindist_file = glob.glob('%s/*/.spack/binary_distribution' % tmpdir)[0]
    workdir = re.sub('/.spack/binary_distribution$', '', bindist_file)
    tty.debug('workdir %s' % workdir)
    # install_tree copies hardlinks
    # create a temporary tarfile from prefix and exract it to workdir
    # tarfile preserves hardlinks
    temp_tarfile_name = tarball_name(spec, '.tar')
    temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name)
    with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
        tar.add(name='%s' % workdir,
                arcname='.')
    with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
        tar.extractall(spec.prefix)
    os.remove(temp_tarfile_path)

    # cleanup
    os.remove(tarfile_path)
    os.remove(specfile_path)

    try:
        relocate_package(spec, allow_root)
    except Exception as e:
        shutil.rmtree(spec.prefix)
        raise e
    else:
        manifest_file = os.path.join(spec.prefix,
                                     spack.store.layout.metadata_dir,
                                     spack.store.layout.manifest_file_name)
        if not os.path.exists(manifest_file):
            spec_id = spec.format('{name}/{hash:7}')
            tty.warn('No manifest file in tarball for spec %s' % spec_id)
    finally:
        shutil.rmtree(tmpdir)
        if os.path.exists(filename):
            os.remove(filename)
コード例 #44
0
def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
                  allow_root=False, key=None, regenerate_index=False):
    """
    Build a tarball from given spec and put it into the directory structure
    used at the mirror (following <tarball_directory_name>).
    """
    if not spec.concrete:
        raise ValueError('spec must be concrete to build tarball')

    # set up some paths
    tmpdir = tempfile.mkdtemp()
    cache_prefix = build_cache_prefix(tmpdir)

    tarfile_name = tarball_name(spec, '.tar.gz')
    tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec))
    tarfile_path = os.path.join(tarfile_dir, tarfile_name)
    spackfile_path = os.path.join(
        cache_prefix, tarball_path_name(spec, '.spack'))

    remote_spackfile_path = url_util.join(
        outdir, os.path.relpath(spackfile_path, tmpdir))

    mkdirp(tarfile_dir)
    if web_util.url_exists(remote_spackfile_path):
        if force:
            web_util.remove_url(remote_spackfile_path)
        else:
            raise NoOverwriteException(url_util.format(remote_spackfile_path))

    # need to copy the spec file so the build cache can be downloaded
    # without concretizing with the current spack packages
    # and preferences
    spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml")
    specfile_name = tarball_name(spec, '.spec.yaml')
    specfile_path = os.path.realpath(
        os.path.join(cache_prefix, specfile_name))

    remote_specfile_path = url_util.join(
        outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir)))

    if web_util.url_exists(remote_specfile_path):
        if force:
            web_util.remove_url(remote_specfile_path)
        else:
            raise NoOverwriteException(url_util.format(remote_specfile_path))

    # make a copy of the install directory to work with
    workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
    # install_tree copies hardlinks
    # create a temporary tarfile from prefix and exract it to workdir
    # tarfile preserves hardlinks
    temp_tarfile_name = tarball_name(spec, '.tar')
    temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name)
    with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
        tar.add(name='%s' % spec.prefix,
                arcname='.')
    with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
        tar.extractall(workdir)
    os.remove(temp_tarfile_path)

    # create info for later relocation and create tar
    write_buildinfo_file(spec, workdir, rel)

    # optionally make the paths in the binaries relative to each other
    # in the spack install tree before creating tarball
    if rel:
        try:
            make_package_relative(workdir, spec, allow_root)
        except Exception as e:
            shutil.rmtree(workdir)
            shutil.rmtree(tarfile_dir)
            shutil.rmtree(tmpdir)
            tty.die(e)
    else:
        try:
            check_package_relocatable(workdir, spec, allow_root)
        except Exception as e:
            shutil.rmtree(workdir)
            shutil.rmtree(tarfile_dir)
            shutil.rmtree(tmpdir)
            tty.die(e)

    # create gzip compressed tarball of the install prefix
    with closing(tarfile.open(tarfile_path, 'w:gz')) as tar:
        tar.add(name='%s' % workdir,
                arcname='%s' % os.path.basename(spec.prefix))
    # remove copy of install directory
    shutil.rmtree(workdir)

    # get the sha256 checksum of the tarball
    checksum = checksum_tarball(tarfile_path)

    # add sha256 checksum to spec.yaml
    with open(spec_file, 'r') as inputfile:
        content = inputfile.read()
        spec_dict = yaml.load(content)
    bchecksum = {}
    bchecksum['hash_algorithm'] = 'sha256'
    bchecksum['hash'] = checksum
    spec_dict['binary_cache_checksum'] = bchecksum
    # Add original install prefix relative to layout root to spec.yaml.
    # This will be used to determine is the directory layout has changed.
    buildinfo = {}
    buildinfo['relative_prefix'] = os.path.relpath(
        spec.prefix, spack.store.layout.root)
    buildinfo['relative_rpaths'] = rel
    spec_dict['buildinfo'] = buildinfo
    spec_dict['full_hash'] = spec.full_hash()

    tty.debug('The full_hash ({0}) of {1} will be written into {2}'.format(
        spec_dict['full_hash'],
        spec.name,
        url_util.format(remote_specfile_path)))
    tty.debug(spec.tree())

    with open(specfile_path, 'w') as outfile:
        outfile.write(syaml.dump(spec_dict))

    # sign the tarball and spec file with gpg
    if not unsigned:
        sign_tarball(key, force, specfile_path)
    # put tarball, spec and signature files in .spack archive
    with closing(tarfile.open(spackfile_path, 'w')) as tar:
        tar.add(name=tarfile_path, arcname='%s' % tarfile_name)
        tar.add(name=specfile_path, arcname='%s' % specfile_name)
        if not unsigned:
            tar.add(name='%s.asc' % specfile_path,
                    arcname='%s.asc' % specfile_name)

    # cleanup file moved to archive
    os.remove(tarfile_path)
    if not unsigned:
        os.remove('%s.asc' % specfile_path)

    web_util.push_to_url(
        spackfile_path, remote_spackfile_path, keep_original=False)
    web_util.push_to_url(
        specfile_path, remote_specfile_path, keep_original=False)

    tty.debug('Buildcache for "{0}" written to \n {1}'
              .format(spec, remote_spackfile_path))

    try:
        # create an index.html for the build_cache directory so specs can be
        # found
        if regenerate_index:
            generate_package_index(url_util.join(
                outdir, os.path.relpath(cache_prefix, tmpdir)))
    finally:
        shutil.rmtree(tmpdir)

    return None
コード例 #45
0
ファイル: release_jobs.py プロジェクト: lorak41/spack
def get_deps_using_container(specs, image):
    image_home_dir = '/home/spackuser'
    repo_mount_location = '{0}/spack'.format(image_home_dir)
    temp_dir = tempfile.mkdtemp(dir='/tmp')

    # The paths this module will see (from outside the container)
    temp_file = os.path.join(temp_dir, 'spec_deps.json')
    temp_err = os.path.join(temp_dir, 'std_err.log')

    # The paths the bash_command will see inside the container
    json_output = '/work/spec_deps.json'
    std_error = '/work/std_err.log'

    specs_arg = ' '.join([str(spec) for spec in specs])

    bash_command = " ".join([
        "source {0}/share/spack/setup-env.sh ;", "spack release-jobs",
        "--specs-deps-output {1}", "{2}", "2> {3}"
    ]).format(repo_mount_location, json_output, specs_arg, std_error)

    docker_cmd_to_run = [
        'docker',
        'run',
        '--rm',
        '-v',
        '{0}:{1}'.format(spack_root, repo_mount_location),
        '-v',
        '{0}:{1}'.format(temp_dir, '/work'),
        '--entrypoint',
        'bash',
        '-t',
        str(image),
        '-c',
        bash_command,
    ]

    tty.debug('Running subprocess command:')
    tty.debug(' '.join(docker_cmd_to_run))

    # Docker is going to merge the stdout/stderr from the script and write it
    # all to the stdout of the running container.  For this reason, we won't
    # pipe any stdout/stderr from the docker command, but rather write the
    # output we care about to a file in a mounted directory.  Similarly, any
    # errors from running the spack command inside the container are redirected
    # to another file in the mounted directory.
    proc = subprocess.Popen(docker_cmd_to_run)
    proc.wait()

    # Check for errors from spack command
    if os.path.exists(temp_err) and os.path.getsize(temp_err) > 0:
        # Spack wrote something to stderr inside the container.  We will
        # print out whatever it is, but attempt to carry on with the process.
        tty.error('Encountered spack error running command in container:')
        with open(temp_err, 'r') as err:
            tty.error(err.read())

    spec_deps_obj = {}

    try:
        # Finally, try to read/parse the output we really care about: the
        # specs and dependency edges for the provided spec, as it was
        # concretized in the appropriate container.
        with open(temp_file, 'r') as fd:
            spec_deps_obj = json.loads(fd.read())

    except ValueError as val_err:
        tty.error('Failed to read json object from spec-deps output file:')
        tty.error(str(val_err))
    except IOError as io_err:
        tty.error('Problem reading from spec-deps json output file:')
        tty.error(str(io_err))
    finally:
        shutil.rmtree(temp_dir)

    return spec_deps_obj
コード例 #46
0
ファイル: stage.py プロジェクト: tvandera/spack
 def fetch(self, *args, **kwargs):
     tty.debug('No need to fetch for DIY.')
コード例 #47
0
ファイル: web.py プロジェクト: wdmapp/spack
def read_from_url(url, accept_content_type=None):
    url = url_util.parse(url)
    context = None

    verify_ssl = spack.config.get('config:verify_ssl')

    # Don't even bother with a context unless the URL scheme is one that uses
    # SSL certs.
    if uses_ssl(url):
        if verify_ssl:
            if __UNABLE_TO_VERIFY_SSL:
                # User wants SSL verification, but it cannot be provided.
                warn_no_ssl_cert_checking()
            else:
                # User wants SSL verification, and it *can* be provided.
                context = ssl.create_default_context()
        else:
            # User has explicitly indicated that they do not want SSL
            # verification.
            context = ssl._create_unverified_context()

    req = Request(url_util.format(url))
    content_type = None
    is_web_url = url.scheme in ('http', 'https')
    if accept_content_type and is_web_url:
        # Make a HEAD request first to check the content type.  This lets
        # us ignore tarballs and gigantic files.
        # It would be nice to do this with the HTTP Accept header to avoid
        # one round-trip.  However, most servers seem to ignore the header
        # if you ask for a tarball with Accept: text/html.
        req.get_method = lambda: "HEAD"
        resp = _urlopen(req, timeout=_timeout, context=context)

        content_type = resp.headers.get('Content-type')

    # Do the real GET request when we know it's just HTML.
    req.get_method = lambda: "GET"

    try:
        response = _urlopen(req, timeout=_timeout, context=context)
    except URLError as err:
        raise SpackWebError('Download failed: {ERROR}'.format(
            ERROR=str(err)))

    if accept_content_type and not is_web_url:
        content_type = response.headers.get('Content-type')

    reject_content_type = (
        accept_content_type and (
            content_type is None or
            not content_type.startswith(accept_content_type)))

    if reject_content_type:
        tty.debug("ignoring page {0}{1}{2}".format(
            url_util.format(url),
            " with content type " if content_type is not None else "",
            content_type or ""))

        return None, None, None

    return response.geturl(), response.headers, response
コード例 #48
0
    def expand(self):
        if not self.expand_archive:
            tty.msg("Staging unexpanded archive %s in %s" % (
                    self.archive_file, self.stage.source_path))
            if not self.stage.expanded:
                mkdirp(self.stage.source_path)
            dest = os.path.join(self.stage.source_path,
                                os.path.basename(self.archive_file))
            shutil.move(self.archive_file, dest)
            return

        tty.msg("Staging archive: %s" % self.archive_file)

        if not self.archive_file:
            raise NoArchiveFileError(
                "Couldn't find archive file",
                "Failed on expand() for URL %s" % self.url)

        if not self.extension:
            self.extension = extension(self.archive_file)

        if self.stage.expanded:
            tty.debug('Source already staged to %s' % self.stage.source_path)
            return

        decompress = decompressor_for(self.archive_file, self.extension)

        # Expand all tarballs in their own directory to contain
        # exploding tarballs.
        tarball_container = os.path.join(self.stage.path,
                                         "spack-expanded-archive")

        mkdirp(tarball_container)
        with working_dir(tarball_container):
            decompress(self.archive_file)

        # Check for an exploding tarball, i.e. one that doesn't expand to
        # a single directory.  If the tarball *didn't* explode, move its
        # contents to the staging source directory & remove the container
        # directory.  If the tarball did explode, just rename the tarball
        # directory to the staging source directory.
        #
        # NOTE: The tar program on Mac OS X will encode HFS metadata in
        # hidden files, which can end up *alongside* a single top-level
        # directory.  We initially ignore presence of hidden files to
        # accomodate these "semi-exploding" tarballs but ensure the files
        # are copied to the source directory.
        files = os.listdir(tarball_container)
        non_hidden = [f for f in files if not f.startswith('.')]
        if len(non_hidden) == 1:
            src = os.path.join(tarball_container, non_hidden[0])
            if os.path.isdir(src):
                self.stage.srcdir = non_hidden[0]
                shutil.move(src, self.stage.source_path)
                if len(files) > 1:
                    files.remove(non_hidden[0])
                    for f in files:
                        src = os.path.join(tarball_container, f)
                        dest = os.path.join(self.stage.path, f)
                        shutil.move(src, dest)
                os.rmdir(tarball_container)
            else:
                # This is a non-directory entry (e.g., a patch file) so simply
                # rename the tarball container to be the source path.
                shutil.move(tarball_container, self.stage.source_path)

        else:
            shutil.move(tarball_container, self.stage.source_path)
コード例 #49
0
def _get_external_packages(packages_to_check, system_path_to_exe=None):
    if not system_path_to_exe:
        system_path_to_exe = _get_system_executables()

    exe_pattern_to_pkgs = defaultdict(list)
    for pkg in packages_to_check:
        if hasattr(pkg, 'executables'):
            for exe in pkg.executables:
                exe_pattern_to_pkgs[exe].append(pkg)

    pkg_to_found_exes = defaultdict(set)
    for exe_pattern, pkgs in exe_pattern_to_pkgs.items():
        compiled_re = re.compile(exe_pattern)
        for path, exe in system_path_to_exe.items():
            if compiled_re.search(exe):
                for pkg in pkgs:
                    pkg_to_found_exes[pkg].add(path)

    pkg_to_entries = defaultdict(list)
    resolved_specs = {}  # spec -> exe found for the spec

    for pkg, exes in pkg_to_found_exes.items():
        if not hasattr(pkg, 'determine_spec_details'):
            tty.warn("{0} must define 'determine_spec_details' in order"
                     " for Spack to detect externally-provided instances"
                     " of the package.".format(pkg.name))
            continue

        # TODO: iterate through this in a predetermined order (e.g. by package
        # name) to get repeatable results when there are conflicts. Note that
        # if we take the prefixes returned by _group_by_prefix, then consider
        # them in the order that they appear in PATH, this should be sufficient
        # to get repeatable results.
        for prefix, exes_in_prefix in _group_by_prefix(exes):
            # TODO: multiple instances of a package can live in the same
            # prefix, and a package implementation can return multiple specs
            # for one prefix, but without additional details (e.g. about the
            # naming scheme which differentiates them), the spec won't be
            # usable.
            specs = _convert_to_iterable(
                pkg.determine_spec_details(prefix, exes_in_prefix))

            if not specs:
                tty.debug(
                    'The following executables in {0} were decidedly not '
                    'part of the package {1}: {2}'.format(
                        prefix, pkg.name,
                        ', '.join(_convert_to_iterable(exes_in_prefix))))

            for spec in specs:
                pkg_prefix = _determine_base_dir(prefix)

                if not pkg_prefix:
                    tty.debug(
                        "{0} does not end with a 'bin/' directory: it"
                        " cannot be added as a Spack package".format(prefix))
                    continue

                if spec in resolved_specs:
                    prior_prefix = ', '.join(
                        _convert_to_iterable(resolved_specs[spec]))

                    tty.debug("Executables in {0} and {1} are both associated"
                              " with the same spec {2}".format(
                                  prefix, prior_prefix, str(spec)))
                    continue
                else:
                    resolved_specs[spec] = prefix

                try:
                    spec.validate_detection()
                except Exception as e:
                    msg = ('"{0}" has been detected on the system but will '
                           'not be added to packages.yaml [reason={1}]')
                    tty.warn(msg.format(spec, str(e)))
                    continue

                if spec.external_path:
                    pkg_prefix = spec.external_path

                pkg_to_entries[pkg.name].append(
                    ExternalPackageEntry(spec=spec, base_dir=pkg_prefix))

    return pkg_to_entries
コード例 #50
0
 def expand(self):
     tty.debug(
         "Source fetched with %s is already expanded." % self.url_attr)
コード例 #51
0
def _spider(url, visited, root, depth, max_depth, raise_on_error):
    """Fetches URL and any pages it links to up to max_depth.

       depth should initially be zero, and max_depth is the max depth of
       links to follow from the root.

       Prints out a warning only if the root can't be fetched; it ignores
       errors with pages that the root links to.

       Returns a tuple of:
       - pages: dict of pages visited (URL) mapped to their full text.
       - links: set of links encountered while visiting the pages.
    """
    pages = {}  # dict from page URL -> text content.
    links = set()  # set of all links seen on visited pages.

    try:
        response_url, _, response = read_from_url(url, 'text/html')
        if not response_url or not response:
            return pages, links

        page = codecs.getreader('utf-8')(response).read()
        pages[response_url] = page

        # Parse out the links in the page
        link_parser = LinkParser()
        subcalls = []
        link_parser.feed(page)

        while link_parser.links:
            raw_link = link_parser.links.pop()
            abs_link = url_util.join(response_url,
                                     raw_link.strip(),
                                     resolve_href=True)
            links.add(abs_link)

            # Skip stuff that looks like an archive
            if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES):
                continue

            # Skip things outside the root directory
            if not abs_link.startswith(root):
                continue

            # Skip already-visited links
            if abs_link in visited:
                continue

            # If we're not at max depth, follow links.
            if depth < max_depth:
                subcalls.append((abs_link, visited, root, depth + 1, max_depth,
                                 raise_on_error))
                visited.add(abs_link)

        if subcalls:
            pool = NonDaemonPool(processes=len(subcalls))
            try:
                results = pool.map(_spider_wrapper, subcalls)

                for sub_pages, sub_links in results:
                    pages.update(sub_pages)
                    links.update(sub_links)

            finally:
                pool.terminate()
                pool.join()

    except URLError as e:
        tty.debug(e)

        if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError):
            tty.warn("Spack was unable to fetch url list due to a certificate "
                     "verification problem. You can try running spack -k, "
                     "which will not check SSL certificates. Use this at your "
                     "own risk.")

        if raise_on_error:
            raise NoNetworkConnectionError(str(e), url)

    except HTMLParseError as e:
        # This error indicates that Python's HTML parser sucks.
        msg = "Got an error parsing HTML."

        # Pre-2.7.3 Pythons in particular have rather prickly HTML parsing.
        if sys.version_info[:3] < (2, 7, 3):
            msg += " Use Python 2.7.3 or newer for better HTML parsing."

        tty.warn(msg, url, "HTMLParseError: " + str(e))

    except Exception as e:
        # Other types of errors are completely ignored, except in debug mode.
        tty.debug("Error in _spider: %s:%s" % (type(e), e),
                  traceback.format_exc())

    return pages, links
コード例 #52
0
ファイル: stage.py プロジェクト: vmiheer/spack
    def __init__(self,
                 url_or_fetch_strategy,
                 name=None,
                 mirror_paths=None,
                 keep=False,
                 path=None,
                 lock=True,
                 search_fn=None):
        """Create a stage object.
           Parameters:
             url_or_fetch_strategy
                 URL of the archive to be downloaded into this stage, OR
                 a valid FetchStrategy.

             name
                 If a name is provided, then this stage is a named stage
                 and will persist between runs (or if you construct another
                 stage object later).  If name is not provided, then this
                 stage will be given a unique name automatically.

             mirror_paths
                 If provided, Stage will search Spack's mirrors for
                 this archive at each of the provided relative mirror paths
                 before using the default fetch strategy.

             keep
                 By default, when used as a context manager, the Stage
                 is deleted on exit when no exceptions are raised.
                 Pass True to keep the stage intact even if no
                 exceptions are raised.

            path
                 If provided, the stage path to use for associated builds.

            lock
                 True if the stage directory file lock is to be used, False
                 otherwise.

            search_fn
                 The search function that provides the fetch strategy
                 instance.
        """
        # TODO: fetch/stage coupling needs to be reworked -- the logic
        # TODO: here is convoluted and not modular enough.
        if isinstance(url_or_fetch_strategy, string_types):
            self.fetcher = fs.from_url_scheme(url_or_fetch_strategy)
        elif isinstance(url_or_fetch_strategy, fs.FetchStrategy):
            self.fetcher = url_or_fetch_strategy
        else:
            raise ValueError(
                "Can't construct Stage without url or fetch strategy")
        self.fetcher.stage = self
        # self.fetcher can change with mirrors.
        self.default_fetcher = self.fetcher
        self.search_fn = search_fn
        # used for mirrored archives of repositories.
        self.skip_checksum_for_mirror = True

        self.srcdir = None

        # TODO: This uses a protected member of tempfile, but seemed the only
        # TODO: way to get a temporary name.  It won't be the same as the
        # TODO: temporary stage area in _stage_root.
        self.name = name
        if name is None:
            self.name = stage_prefix + next(tempfile._get_candidate_names())
        self.mirror_paths = mirror_paths

        # Use the provided path or construct an optionally named stage path.
        if path is not None:
            self.path = path
        else:
            self.path = os.path.join(get_stage_root(), self.name)

        # Flag to decide whether to delete the stage folder on exit or not
        self.keep = keep

        # File lock for the stage directory.  We use one file for all
        # stage locks. See spack.database.Database.prefix_lock for
        # details on this approach.
        self._lock = None
        if lock:
            if self.name not in Stage.stage_locks:
                sha1 = hashlib.sha1(self.name.encode('utf-8')).digest()
                lock_id = prefix_bits(sha1, bit_length(sys.maxsize))
                stage_lock_path = os.path.join(get_stage_root(), '.lock')

                tty.debug("Creating stage lock {0}".format(self.name))
                Stage.stage_locks[self.name] = spack.util.lock.Lock(
                    stage_lock_path, lock_id, 1, desc=self.name)

            self._lock = Stage.stage_locks[self.name]

        # When stages are reused, we need to know whether to re-create
        # it.  This marks whether it has been created/destroyed.
        self.created = False
コード例 #53
0
ファイル: stage.py プロジェクト: tvandera/spack
def get_checksums_for_versions(url_dict, name, **kwargs):
    """Fetches and checksums archives from URLs.

    This function is called by both ``spack checksum`` and ``spack
    create``.  The ``first_stage_function`` argument allows the caller to
    inspect the first downloaded archive, e.g., to determine the build
    system.

    Args:
        url_dict (dict): A dictionary of the form: version -> URL
        name (str): The name of the package
        first_stage_function (typing.Callable): function that takes a Stage and a URL;
            this is run on the stage of the first URL downloaded
        keep_stage (bool): whether to keep staging area when command completes
        batch (bool): whether to ask user how many versions to fetch (false)
            or fetch all versions (true)
        latest (bool): whether to take the latest version (true) or all (false)
        fetch_options (dict): Options used for the fetcher (such as timeout
            or cookies)

    Returns:
        (str): A multi-line string containing versions and corresponding hashes

    """
    batch = kwargs.get('batch', False)
    fetch_options = kwargs.get('fetch_options', None)
    first_stage_function = kwargs.get('first_stage_function', None)
    keep_stage = kwargs.get('keep_stage', False)
    latest = kwargs.get('latest', False)

    sorted_versions = sorted(url_dict.keys(), reverse=True)
    if latest:
        sorted_versions = sorted_versions[:1]

    # Find length of longest string in the list for padding
    max_len = max(len(str(v)) for v in sorted_versions)
    num_ver = len(sorted_versions)

    tty.msg(
        'Found {0} version{1} of {2}:'.format(num_ver,
                                              '' if num_ver == 1 else 's',
                                              name), '',
        *llnl.util.lang.elide_list([
            '{0:{1}}  {2}'.format(str(v), max_len, url_dict[v])
            for v in sorted_versions
        ]))
    print()

    if batch or latest:
        archives_to_fetch = len(sorted_versions)
    else:
        archives_to_fetch = tty.get_number(
            "How many would you like to checksum?", default=1, abort='q')

    if not archives_to_fetch:
        tty.die("Aborted.")

    versions = sorted_versions[:archives_to_fetch]
    urls = [url_dict[v] for v in versions]

    tty.debug('Downloading...')
    version_hashes = []
    i = 0
    errors = []
    for url, version in zip(urls, versions):
        # Wheels should not be expanded during staging
        expand_arg = ''
        if url.endswith('.whl') or '.whl#' in url:
            expand_arg = ', expand=False'
        try:
            if fetch_options:
                url_or_fs = fs.URLFetchStrategy(url,
                                                fetch_options=fetch_options)
            else:
                url_or_fs = url
            with Stage(url_or_fs, keep=keep_stage) as stage:
                # Fetch the archive
                stage.fetch()
                if i == 0 and first_stage_function:
                    # Only run first_stage_function the first time,
                    # no need to run it every time
                    first_stage_function(stage, url)

                # Checksum the archive and add it to the list
                version_hashes.append(
                    (version,
                     spack.util.crypto.checksum(hashlib.sha256,
                                                stage.archive_file)))
                i += 1
        except FailedDownloadError:
            errors.append('Failed to fetch {0}'.format(url))
        except Exception as e:
            tty.msg('Something failed on {0}, skipping.  ({1})'.format(url, e))

    for msg in errors:
        tty.debug(msg)

    if not version_hashes:
        tty.die("Could not fetch any versions for {0}".format(name))

    # Find length of longest string in the list for padding
    max_len = max(len(str(v)) for v, h in version_hashes)

    # Generate the version directives to put in a package.py
    version_lines = "\n".join([
        "    version('{0}', {1}sha256='{2}'{3})".format(
            v, ' ' * (max_len - len(str(v))), h, expand_arg)
        for v, h in version_hashes
    ])

    num_hash = len(version_hashes)
    tty.debug('Checksummed {0} version{1} of {2}:'.format(
        num_hash, '' if num_hash == 1 else 's', name))

    return version_lines
コード例 #54
0
ファイル: stage.py プロジェクト: vmiheer/spack
    def fetch(self, mirror_only=False, err_msg=None):
        """Retrieves the code or archive

        Args:
            mirror_only (bool): only fetch from a mirror
            err_msg (str or None): the error message to display if all fetchers
                fail or ``None`` for the default fetch failure message
        """
        fetchers = []
        if not mirror_only:
            fetchers.append(self.default_fetcher)

        # TODO: move mirror logic out of here and clean it up!
        # TODO: Or @alalazo may have some ideas about how to use a
        # TODO: CompositeFetchStrategy here.
        self.skip_checksum_for_mirror = True
        if self.mirror_paths:
            # Join URLs of mirror roots with mirror paths. Because
            # urljoin() will strip everything past the final '/' in
            # the root, so we add a '/' if it is not present.
            mirror_urls = {}
            for mirror in spack.mirror.MirrorCollection().values():
                for rel_path in self.mirror_paths:
                    mirror_url = url_util.join(mirror.fetch_url, rel_path)
                    mirror_urls[mirror_url] = {}
                    if mirror.get_access_pair("fetch") or \
                       mirror.get_access_token("fetch") or \
                       mirror.get_profile("fetch"):
                        mirror_urls[mirror_url] = {
                            "access_token": mirror.get_access_token("fetch"),
                            "access_pair": mirror.get_access_pair("fetch"),
                            "access_profile": mirror.get_profile("fetch"),
                            "endpoint_url": mirror.get_endpoint_url("fetch")
                        }

            # If this archive is normally fetched from a tarball URL,
            # then use the same digest.  `spack mirror` ensures that
            # the checksum will be the same.
            digest = None
            expand = True
            extension = None
            if isinstance(self.default_fetcher, fs.URLFetchStrategy):
                digest = self.default_fetcher.digest
                expand = self.default_fetcher.expand_archive
                extension = self.default_fetcher.extension

            # Have to skip the checksum for things archived from
            # repositories.  How can this be made safer?
            self.skip_checksum_for_mirror = not bool(digest)

            # Add URL strategies for all the mirrors with the digest
            # Insert fetchers in the order that the URLs are provided.
            for url in reversed(list(mirror_urls.keys())):
                fetchers.insert(
                    0,
                    fs.from_url_scheme(url,
                                       digest,
                                       expand=expand,
                                       extension=extension,
                                       connection=mirror_urls[url]))

            if self.default_fetcher.cachable:
                for rel_path in reversed(list(self.mirror_paths)):
                    cache_fetcher = spack.caches.fetch_cache.fetcher(
                        rel_path, digest, expand=expand, extension=extension)
                    fetchers.insert(0, cache_fetcher)

        def generate_fetchers():
            for fetcher in fetchers:
                yield fetcher
            # The search function may be expensive, so wait until now to
            # call it so the user can stop if a prior fetcher succeeded
            if self.search_fn and not mirror_only:
                dynamic_fetchers = self.search_fn()
                for fetcher in dynamic_fetchers:
                    yield fetcher

        def print_errors(errors):
            for msg in errors:
                tty.debug(msg)

        errors = []
        for fetcher in generate_fetchers():
            try:
                fetcher.stage = self
                self.fetcher = fetcher
                self.fetcher.fetch()
                break
            except spack.fetch_strategy.NoCacheError:
                # Don't bother reporting when something is not cached.
                continue
            except spack.error.SpackError as e:
                errors.append('Fetching from {0} failed.'.format(fetcher))
                tty.debug(e)
                continue
        else:
            print_errors(errors)

            self.fetcher = self.default_fetcher
            default_msg = 'All fetchers failed for {0}'.format(self.name)
            raise fs.FetchError(err_msg or default_msg, None)

        print_errors(errors)
コード例 #55
0
ファイル: stage.py プロジェクト: tvandera/spack
 def expand_archive(self):
     tty.debug('Using source directory: {0}'.format(self.source_path))
コード例 #56
0
ファイル: compiler.py プロジェクト: noslin005/spack-contrib
def _parse_link_paths(string):
    """Parse implicit link paths from compiler debug output.

    This gives the compiler runtime library paths that we need to add to
    the RPATH of generated binaries and libraries.  It allows us to
    ensure, e.g., that codes load the right libstdc++ for their compiler.
    """
    lib_search_paths = False
    raw_link_dirs = []
    tty.debug('parsing implicit link info')
    for line in string.splitlines():
        if lib_search_paths:
            if line.startswith('\t'):
                raw_link_dirs.append(line[1:])
                continue
            else:
                lib_search_paths = False
        elif line.startswith('Library search paths:'):
            lib_search_paths = True

        if not _LINKER_LINE.match(line):
            continue
        if _LINKER_LINE_IGNORE.match(line):
            continue
        tty.debug('linker line: %s' % line)

        next_arg = False
        for arg in line.split():
            if arg in ('-L', '-Y'):
                next_arg = True
                continue

            if next_arg:
                raw_link_dirs.append(arg)
                next_arg = False
                continue

            link_dir_arg = _LINK_DIR_ARG.match(arg)
            if link_dir_arg:
                link_dir = link_dir_arg.group('dir')
                tty.debug('linkdir: %s' % link_dir)
                raw_link_dirs.append(link_dir)

            link_dir_arg = _LIBPATH_ARG.match(arg)
            if link_dir_arg:
                link_dir = link_dir_arg.group('dir')
                tty.debug('libpath: %s', link_dir)
                raw_link_dirs.append(link_dir)
    tty.debug('found raw link dirs: %s' % ', '.join(raw_link_dirs))

    implicit_link_dirs = list()
    visited = set()
    for link_dir in raw_link_dirs:
        normalized_path = os.path.abspath(link_dir)
        if normalized_path not in visited:
            implicit_link_dirs.append(normalized_path)
            visited.add(normalized_path)

    tty.debug('found link dirs: %s' % ', '.join(implicit_link_dirs))
    return implicit_link_dirs
コード例 #57
0
ファイル: ci.py プロジェクト: xsdk-project/spack-xsdk
def ci_rebuild(args):
    """This command represents a gitlab-ci job, corresponding to a single
       release spec.  As such it must first decide whether or not the spec it
       has been assigned to build is up to date on the remote binary mirror.
       If it is not (i.e. the full_hash of the spec as computed locally does
       not match the one stored in the metadata on the mirror), this script
       will build the package, create a binary cache for it, and then push all
       related files to the remote binary mirror.  This script also
       communicates with a remote CDash instance to share status on the package
       build process.

       The spec to be built by this job is represented by essentially two
       pieces of information: 1) a root spec (possibly already concrete, but
       maybe still needing to be concretized) and 2) a package name used to
       index that root spec (once the root is, for certain, concrete)."""
    env = ev.get_env(args, 'ci rebuild', required=True)
    yaml_root = ev.config_dict(env.yaml)

    # The following environment variables should defined in the CI
    # infrastructre (or some other external source) in the case that the
    # remote mirror is an S3 bucket.  The AWS keys are used to upload
    # buildcache entries to S3 using the boto3 api.
    #
    # AWS_ACCESS_KEY_ID
    # AWS_SECRET_ACCESS_KEY
    # S3_ENDPOINT_URL (only needed for non-AWS S3 implementations)
    #
    # If present, we will import the  SPACK_SIGNING_KEY using the
    # "spack gpg trust" command, so it can be used both for verifying
    # dependency buildcache entries and signing the buildcache entry we create
    # for our target pkg.
    #
    # SPACK_SIGNING_KEY

    ci_artifact_dir = get_env_var('CI_PROJECT_DIR')
    signing_key = get_env_var('SPACK_SIGNING_KEY')
    root_spec = get_env_var('SPACK_ROOT_SPEC')
    job_spec_pkg_name = get_env_var('SPACK_JOB_SPEC_PKG_NAME')
    compiler_action = get_env_var('SPACK_COMPILER_ACTION')
    cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
    related_builds = get_env_var('SPACK_RELATED_BUILDS_CDASH')
    pr_env_var = get_env_var('SPACK_IS_PR_PIPELINE')

    gitlab_ci = None
    if 'gitlab-ci' in yaml_root:
        gitlab_ci = yaml_root['gitlab-ci']

    if not gitlab_ci:
        tty.die('spack ci rebuild requires an env containing gitlab-ci cfg')

    enable_cdash = False
    if 'cdash' in yaml_root:
        enable_cdash = True
        ci_cdash = yaml_root['cdash']
        job_spec_buildgroup = ci_cdash['build-group']
        cdash_base_url = ci_cdash['url']
        cdash_project = ci_cdash['project']
        proj_enc = urlencode({'project': cdash_project})
        eq_idx = proj_enc.find('=') + 1
        cdash_project_enc = proj_enc[eq_idx:]
        cdash_site = ci_cdash['site']
        tty.debug('cdash_base_url = {0}'.format(cdash_base_url))
        tty.debug('cdash_project = {0}'.format(cdash_project))
        tty.debug('cdash_project_enc = {0}'.format(cdash_project_enc))
        tty.debug('cdash_build_name = {0}'.format(cdash_build_name))
        tty.debug('cdash_site = {0}'.format(cdash_site))
        tty.debug('related_builds = {0}'.format(related_builds))
        tty.debug('job_spec_buildgroup = {0}'.format(job_spec_buildgroup))

    remote_mirror_url = None
    if 'mirrors' in yaml_root:
        ci_mirrors = yaml_root['mirrors']
        mirror_urls = [url for url in ci_mirrors.values()]
        remote_mirror_url = mirror_urls[0]

    if not remote_mirror_url:
        tty.die('spack ci rebuild requires an env containing a mirror')

    tty.debug('ci_artifact_dir = {0}'.format(ci_artifact_dir))
    tty.debug('root_spec = {0}'.format(root_spec))
    tty.debug('remote_mirror_url = {0}'.format(remote_mirror_url))
    tty.debug('job_spec_pkg_name = {0}'.format(job_spec_pkg_name))
    tty.debug('compiler_action = {0}'.format(compiler_action))

    spack_cmd = exe.which('spack')

    cdash_report_dir = os.path.join(ci_artifact_dir, 'cdash_report')
    temp_dir = os.path.join(ci_artifact_dir, 'jobs_scratch_dir')
    job_log_dir = os.path.join(temp_dir, 'logs')
    spec_dir = os.path.join(temp_dir, 'specs')

    local_mirror_dir = os.path.join(ci_artifact_dir, 'local_mirror')
    build_cache_dir = os.path.join(local_mirror_dir, 'build_cache')

    spack_is_pr_pipeline = True if pr_env_var == 'True' else False

    enable_artifacts_mirror = False
    artifact_mirror_url = None
    if 'enable-artifacts-buildcache' in gitlab_ci:
        enable_artifacts_mirror = gitlab_ci['enable-artifacts-buildcache']
        if enable_artifacts_mirror or spack_is_pr_pipeline:
            # If this is a PR pipeline, we will override the setting to
            # make sure that artifacts buildcache is enabled.  Otherwise
            # jobs will not have binary deps available since we do not
            # allow pushing binaries to remote mirror during PR pipelines
            enable_artifacts_mirror = True
            artifact_mirror_url = 'file://' + local_mirror_dir
            mirror_msg = 'artifact buildcache enabled, mirror url: {0}'.format(
                artifact_mirror_url)
            tty.debug(mirror_msg)

    # Clean out scratch directory from last stage
    if os.path.exists(temp_dir):
        shutil.rmtree(temp_dir)

    if os.path.exists(cdash_report_dir):
        shutil.rmtree(cdash_report_dir)

    os.makedirs(job_log_dir)
    os.makedirs(spec_dir)

    job_spec_yaml_path = os.path.join(spec_dir,
                                      '{0}.yaml'.format(job_spec_pkg_name))
    job_log_file = os.path.join(job_log_dir, 'pipeline_log.txt')

    cdash_build_id = None
    cdash_build_stamp = None

    with open(job_log_file, 'w') as log_fd:
        os.dup2(log_fd.fileno(), sys.stdout.fileno())
        os.dup2(log_fd.fileno(), sys.stderr.fileno())

        current_directory = os.getcwd()
        tty.debug('Current working directory: {0}, Contents:'.format(
            current_directory))
        directory_list = os.listdir(current_directory)
        for next_entry in directory_list:
            tty.debug('  {0}'.format(next_entry))

        # Make a copy of the environment file, so we can overwrite the changed
        # version in between the two invocations of "spack install"
        env_src_path = env.manifest_path
        env_dirname = os.path.dirname(env_src_path)
        env_filename = os.path.basename(env_src_path)
        env_copyname = '{0}_BACKUP'.format(env_filename)
        env_dst_path = os.path.join(env_dirname, env_copyname)
        shutil.copyfile(env_src_path, env_dst_path)

        tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path))

        if signing_key:
            spack_ci.import_signing_key(signing_key)

        spack_ci.configure_compilers(compiler_action)

        spec_map = spack_ci.get_concrete_specs(root_spec, job_spec_pkg_name,
                                               related_builds, compiler_action)

        job_spec = spec_map[job_spec_pkg_name]

        tty.debug('Here is the concrete spec: {0}'.format(job_spec))

        with open(job_spec_yaml_path, 'w') as fd:
            fd.write(job_spec.to_yaml(hash=ht.build_hash))

        tty.debug('Done writing concrete spec')

        # DEBUG
        with open(job_spec_yaml_path) as fd:
            tty.debug('Wrote spec file, read it back.  Contents:')
            tty.debug(fd.read())

        # DEBUG the root spec
        root_spec_yaml_path = os.path.join(spec_dir, 'root.yaml')
        with open(root_spec_yaml_path, 'w') as fd:
            fd.write(spec_map['root'].to_yaml(hash=ht.build_hash))

        if bindist.needs_rebuild(job_spec, remote_mirror_url, True):
            # Binary on remote mirror is not up to date, we need to rebuild
            # it.
            #
            # FIXME: ensure mirror precedence causes this local mirror to
            # be chosen ahead of the remote one when installing deps
            if enable_artifacts_mirror:
                mirror_add_output = spack_cmd('mirror', 'add', 'local_mirror',
                                              artifact_mirror_url)
                tty.debug('spack mirror add:')
                tty.debug(mirror_add_output)

            mirror_list_output = spack_cmd('mirror', 'list')
            tty.debug('listing spack mirrors:')
            tty.debug(mirror_list_output)

            # 2) build up install arguments
            install_args = ['-d', '-v', '-k', 'install', '--keep-stage']

            # 3) create/register a new build on CDash (if enabled)
            cdash_args = []
            if enable_cdash:
                tty.debug('Registering build with CDash')
                (cdash_build_id,
                 cdash_build_stamp) = spack_ci.register_cdash_build(
                     cdash_build_name, cdash_base_url, cdash_project,
                     cdash_site, job_spec_buildgroup)

                cdash_upload_url = '{0}/submit.php?project={1}'.format(
                    cdash_base_url, cdash_project_enc)

                cdash_args = [
                    '--cdash-upload-url',
                    cdash_upload_url,
                    '--cdash-build',
                    cdash_build_name,
                    '--cdash-site',
                    cdash_site,
                    '--cdash-buildstamp',
                    cdash_build_stamp,
                ]

            spec_cli_arg = [job_spec_yaml_path]

            tty.debug('Installing package')

            try:
                # Two-pass install is intended to avoid spack trying to
                # install from buildcache even though the locally computed
                # full hash is different than the one stored in the spec.yaml
                # file on the remote mirror.
                first_pass_args = install_args + [
                    '--cache-only',
                    '--only',
                    'dependencies',
                ]
                first_pass_args.extend(spec_cli_arg)
                tty.debug('First pass install arguments: {0}'.format(
                    first_pass_args))
                spack_cmd(*first_pass_args)

                # Overwrite the changed environment file so it doesn't break
                # the next install invocation.
                tty.debug('Copying {0} to {1}'.format(env_dst_path,
                                                      env_src_path))
                shutil.copyfile(env_dst_path, env_src_path)

                second_pass_args = install_args + [
                    '--no-cache',
                    '--only',
                    'package',
                ]
                second_pass_args.extend(cdash_args)
                second_pass_args.extend(spec_cli_arg)
                tty.debug('Second pass install arguments: {0}'.format(
                    second_pass_args))
                spack_cmd(*second_pass_args)
            except Exception as inst:
                tty.error('Caught exception during install:')
                tty.error(inst)

            spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)

            # 4) create buildcache on remote mirror, but not if this is
            # running to test a spack PR
            if not spack_is_pr_pipeline:
                spack_ci.push_mirror_contents(env, job_spec,
                                              job_spec_yaml_path,
                                              remote_mirror_url,
                                              cdash_build_id)

            # 5) create another copy of that buildcache on "local artifact
            # mirror" (only done if cash reporting is enabled)
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          artifact_mirror_url, cdash_build_id)

            # 6) relate this build to its dependencies on CDash (if enabled)
            if enable_cdash:
                spack_ci.relate_cdash_builds(
                    spec_map, cdash_base_url, cdash_build_id, cdash_project,
                    artifact_mirror_url or remote_mirror_url)
        else:
            # There is nothing to do here unless "local artifact mirror" is
            # enabled, in which case, we need to download the buildcache to
            # the local artifacts directory to be used by dependent jobs in
            # subsequent stages
            tty.debug('No need to rebuild {0}'.format(job_spec_pkg_name))
            if enable_artifacts_mirror:
                tty.debug('Getting {0} buildcache'.format(job_spec_pkg_name))
                tty.debug('Downloading to {0}'.format(build_cache_dir))
                buildcache.download_buildcache_files(job_spec, build_cache_dir,
                                                     True, remote_mirror_url)
コード例 #58
0
 def debug_info(line_header, match_list):
     if match_list:
         msg = '\t{0} : {1}'.format(line_header, spec.cshort_spec)
         tty.debug(msg)
         for rule in match_list:
             tty.debug('\t\tmatches rule: {0}'.format(rule))
コード例 #59
0
def relocate_package(spec, allow_root):
    """
    Relocate the given package
    """
    workdir = str(spec.prefix)
    buildinfo = read_buildinfo_file(workdir)
    new_layout_root = str(spack.store.layout.root)
    new_prefix = str(spec.prefix)
    new_rel_prefix = str(os.path.relpath(new_prefix, new_layout_root))
    new_spack_prefix = str(spack.paths.prefix)
    old_layout_root = str(buildinfo['buildpath'])
    old_spack_prefix = str(buildinfo.get('spackprefix'))
    old_rel_prefix = buildinfo.get('relative_prefix')
    old_prefix = os.path.join(old_layout_root, old_rel_prefix)
    rel = buildinfo.get('relative_rpaths')
    prefix_to_hash = buildinfo.get('prefix_to_hash', None)
    if (old_rel_prefix != new_rel_prefix and not prefix_to_hash):
        msg = "Package tarball was created from an install "
        msg += "prefix with a different directory layout and an older "
        msg += "buildcache create implementation. It cannot be relocated."
        raise NewLayoutException(msg)
    # older buildcaches do not have the prefix_to_hash dictionary
    # need to set an empty dictionary and add one entry to
    # prefix_to_prefix to reproduce the old behavior
    if not prefix_to_hash:
        prefix_to_hash = dict()
    hash_to_prefix = dict()
    hash_to_prefix[spec.format('{hash}')] = str(spec.package.prefix)
    new_deps = spack.build_environment.get_rpath_deps(spec.package)
    for d in new_deps:
        hash_to_prefix[d.format('{hash}')] = str(d.prefix)
    prefix_to_prefix = dict()
    for orig_prefix, hash in prefix_to_hash.items():
        prefix_to_prefix[orig_prefix] = hash_to_prefix.get(hash, None)
    prefix_to_prefix[old_prefix] = new_prefix
    prefix_to_prefix[old_layout_root] = new_layout_root

    tty.debug("Relocating package from",
              "%s to %s." % (old_layout_root, new_layout_root))

    def is_backup_file(file):
        return file.endswith('~')

    # Text files containing the prefix text
    text_names = list()
    for filename in buildinfo['relocate_textfiles']:
        text_name = os.path.join(workdir, filename)
        # Don't add backup files generated by filter_file during install step.
        if not is_backup_file(text_name):
            text_names.append(text_name)

# If we are not installing back to the same install tree do the relocation
    if old_layout_root != new_layout_root:
        files_to_relocate = [os.path.join(workdir, filename)
                             for filename in buildinfo.get('relocate_binaries')
                             ]
        # If the buildcache was not created with relativized rpaths
        # do the relocation of path in binaries
        if (spec.architecture.platform == 'darwin' or
            spec.architecture.platform == 'test' and
                platform.system().lower() == 'darwin'):
            relocate.relocate_macho_binaries(files_to_relocate,
                                             old_layout_root,
                                             new_layout_root,
                                             prefix_to_prefix, rel,
                                             old_prefix,
                                             new_prefix)
        if (spec.architecture.platform == 'linux' or
            spec.architecture.platform == 'test' and
                platform.system().lower() == 'linux'):
            relocate.relocate_elf_binaries(files_to_relocate,
                                           old_layout_root,
                                           new_layout_root,
                                           prefix_to_prefix, rel,
                                           old_prefix,
                                           new_prefix)
            # Relocate links to the new install prefix
            links = [link for link in buildinfo.get('relocate_links', [])]
            relocate.relocate_links(
                links, old_layout_root, old_prefix, new_prefix
            )

        # For all buildcaches
        # relocate the install prefixes in text files including dependencies
        relocate.relocate_text(text_names,
                               old_layout_root, new_layout_root,
                               old_prefix, new_prefix,
                               old_spack_prefix,
                               new_spack_prefix,
                               prefix_to_prefix)

        paths_to_relocate = [old_prefix, old_layout_root]
        paths_to_relocate.extend(prefix_to_hash.keys())
        files_to_relocate = list(filter(
            lambda pathname: not relocate.file_is_relocatable(
                pathname, paths_to_relocate=paths_to_relocate),
            map(lambda filename: os.path.join(workdir, filename),
                buildinfo['relocate_binaries'])))
        # relocate the install prefixes in binary files including dependencies
        relocate.relocate_text_bin(files_to_relocate,
                                   old_prefix, new_prefix,
                                   old_spack_prefix,
                                   new_spack_prefix,
                                   prefix_to_prefix)

# If we are installing back to the same location
# relocate the sbang location if the spack directory changed
    else:
        if old_spack_prefix != new_spack_prefix:
            relocate.relocate_text(text_names,
                                   old_layout_root, new_layout_root,
                                   old_prefix, new_prefix,
                                   old_spack_prefix,
                                   new_spack_prefix,
                                   prefix_to_prefix)
コード例 #60
0
ファイル: stage.py プロジェクト: tvandera/spack
 def print_errors(errors):
     for msg in errors:
         tty.debug(msg)