Beispiel #1
0
    def merge(self, dest_root, **kwargs):
        """Link all files in src into dest, creating directories
           if necessary.
           If ignore_conflicts is True, do not break when the target exists but
           rather return a list of files that could not be linked.
           Note that files blocking directories will still cause an error.
        """
        ignore_conflicts = kwargs.get("ignore_conflicts", False)

        ignore = kwargs.get('ignore', lambda x: False)
        conflict = self.find_conflict(
            dest_root, ignore=ignore, ignore_file_conflicts=ignore_conflicts)
        if conflict:
            raise MergeConflictError(conflict)

        self.merge_directories(dest_root, ignore)
        existing = []
        merge_file = kwargs.get('merge_file', merge_link)
        for src, dst in self.get_file_map(dest_root, ignore).items():
            if os.path.exists(dst):
                existing.append(dst)
            else:
                merge_file(src, dst)

        for c in existing:
            tty.warn("Could not merge: %s" % c)
Beispiel #2
0
def link_one(spec, path, link=os.symlink, verbose=False):
    'Link all files in `spec` into directory `path`.'

    dotspack = transform_path(spec, '.spack', path)
    if os.path.exists(dotspack):
        tty.warn('Skipping existing package: "%s"' % spec.name)
        return

    if verbose:
        tty.info('Linking package: "%s"' % spec.name)
    for dirpath, dirnames, filenames in os.walk(spec.prefix):
        if not filenames:
            continue        # avoid explicitly making empty dirs

        targdir = transform_path(spec, dirpath, path)
        assuredir(targdir)

        for fname in filenames:
            src = os.path.join(dirpath, fname)
            dst = os.path.join(targdir, fname)
            if os.path.exists(dst):
                if '.spack' in dst.split(os.path.sep):
                    continue    # silence these
                tty.warn("Skipping existing file: %s" % dst)
                continue
            link(src, dst)
Beispiel #3
0
 def process_environment_command(self, env):
     for command in env:
         try:
             yield self.formats[type(command)].format(**command.args)
         except KeyError:
             tty.warn('Cannot handle command of type {command} : skipping request'.format(command=type(command)))
             tty.warn('{context} at {filename}:{lineno}'.format(**command.args))
Beispiel #4
0
    def __init__(self, *repo_dirs, **kwargs):
        # super-namespace for all packages in the RepoPath
        self.super_namespace = kwargs.get('namespace', repo_namespace)

        self.repos = []
        self.by_namespace = NamespaceTrie()
        self.by_path = {}

        self._all_package_names = None
        self._provider_index = None

        # If repo_dirs is empty, just use the configuration
        if not repo_dirs:
            import spack.config
            repo_dirs = spack.config.get_config('repos')
            if not repo_dirs:
                raise NoRepoConfiguredError(
                    "Spack configuration contains no package repositories.")

        # Add each repo to this path.
        for root in repo_dirs:
            try:
                repo = Repo(root, self.super_namespace)
                self.put_last(repo)
            except RepoError as e:
                tty.warn("Failed to initialize repository at '%s'." % root,
                         e.message,
                         "To remove the bad repository, run this command:",
                         "    spack repo rm %s" % root)
Beispiel #5
0
    def dump_provenance(self, spec, path):
        """Dump provenance information for a spec to a particular path.

           This dumps the package file and any associated patch files.
           Raises UnknownPackageError if not found.
        """
        # Some preliminary checks.
        if spec.virtual:
            raise UnknownPackageError(spec.name)

        if spec.namespace and spec.namespace != self.namespace:
            raise UnknownPackageError(
                "Repository %s does not contain package %s."
                % (self.namespace, spec.fullname))

        # Install any patch files needed by packages.
        mkdirp(path)
        for spec, patches in spec.package.patches.items():
            for patch in patches:
                if patch.path:
                    if os.path.exists(patch.path):
                        install(patch.path, path)
                    else:
                        tty.warn("Patch file did not exist: %s" % patch.path)

        # Install the package.py file itself.
        install(self.filename_for_package_name(spec), path)
Beispiel #6
0
def filter_shebang(path):
    """Adds a second shebang line, using sbang, at the beginning of a file."""
    with open(path, 'r') as original_file:
        original = original_file.read()

    # This line will be prepended to file
    new_sbang_line = '#!/bin/bash %s/bin/sbang\n' % spack.spack_root

    # Skip files that are already using sbang.
    if original.startswith(new_sbang_line):
        return

    # Use --! instead of #! on second line for lua.
    if re.search(r'^#!(/[^/]*)*lua\b', original):
        original = re.sub(r'^#', '--', original)

    # Change non-writable files to be writable if needed.
    saved_mode = None
    if not os.access(path, os.W_OK):
        st = os.stat(path)
        saved_mode = st.st_mode
        os.chmod(path, saved_mode | stat.S_IWRITE)

    with open(path, 'w') as new_file:
        new_file.write(new_sbang_line)
        new_file.write(original)

    # Restore original permissions.
    if saved_mode is not None:
        os.chmod(path, saved_mode)

    tty.warn("Patched overlong shebang in %s" % path)
Beispiel #7
0
    def installed_relatives(self, spec, direction='children', transitive=True):
        """Return installed specs related to this one."""
        if direction not in ('parents', 'children'):
            raise ValueError("Invalid direction: %s" % direction)

        relatives = set()
        for spec in self.query(spec):
            if transitive:
                to_add = spec.traverse(direction=direction, root=False)
            elif direction == 'parents':
                to_add = spec.dependents()
            else:  # direction == 'children'
                to_add = spec.dependencies()

            for relative in to_add:
                hash_key = relative.dag_hash()
                if hash_key not in self._data:
                    reltype = ('Dependent' if direction == 'parents'
                               else 'Dependency')
                    tty.warn("Inconsistent state! %s %s of %s not in DB"
                             % (reltype, hash_key, spec.dag_hash()))
                    continue

                if not self._data[hash_key].installed:
                    continue

                relatives.add(relative)
        return relatives
Beispiel #8
0
    def print_status(self, *specs, **kwargs):
        if kwargs.get("with_dependencies", False):
            specs = set(get_dependencies(specs))

        specs = sorted(specs, key=lambda s: s.name)
        in_view = list(map(self.get_spec, specs))

        for s, v in zip(specs, in_view):
            if not v:
                tty.error(self._croot +
                          'Package not linked: %s' % s.name)
            elif s != v:
                self.print_conflict(v, s, level="warn")

        in_view = list(filter(None, in_view))

        if len(specs) > 0:
            tty.msg("Packages linked in %s:" % self._croot[:-1])

            # avoid circular dependency
            import spack.cmd
            spack.cmd.display_specs(in_view, flags=True, variants=True,
                                    long=self.verbose)
        else:
            tty.warn(self._croot + "No packages found.")
Beispiel #9
0
def get_matching_versions(specs, **kwargs):
    """Get a spec for EACH known version matching any spec in the list."""
    matching = []
    for spec in specs:
        pkg = spec.package

        # Skip any package that has no known versions.
        if not pkg.versions:
            tty.msg("No safe (checksummed) versions for package %s" % pkg.name)
            continue

        num_versions = kwargs.get('num_versions', 0)
        matching_spec = []
        for i, v in enumerate(reversed(sorted(pkg.versions))):
            # Generate no more than num_versions versions for each spec.
            if num_versions and i >= num_versions:
                break

            # Generate only versions that satisfy the spec.
            if v.satisfies(spec.versions):
                s = Spec(pkg.name)
                s.versions = VersionList([v])
                s.variants = spec.variants.copy()
                # This is needed to avoid hanging references during the
                # concretization phase
                s.variants.spec = s
                matching_spec.append(s)

        if not matching_spec:
            tty.warn("No known version matches spec: %s" % spec)
        matching.extend(matching_spec)

    return matching
Beispiel #10
0
    def fetch_from_url(self, url):
        # Run curl but grab the mime type from the http headers
        headers = spack.curl('-#',        # status bar
                             '-O',        # save file to disk
                             '-D', '-',   # print out HTML headers
                             '-L', url,
                             return_output=True, fail_on_error=False)

        if spack.curl.returncode != 0:
            # clean up archive on failure.
            if self.archive_file:
                os.remove(self.archive_file)

            if spack.curl.returncode == 60:
                # This is a certificate error.  Suggest spack -k
                raise FailedDownloadError(
                    url,
                    "Curl was unable to fetch due to invalid certificate. "
                    "This is either an attack, or your cluster's SSL configuration "
                    "is bad.  If you believe your SSL configuration is bad, you "
                    "can try running spack -k, which will not check SSL certificates."
                    "Use this at your own risk.")

        # Check if we somehow got an HTML file rather than the archive we
        # asked for.  We only look at the last content type, to handle
        # redirects properly.
        content_types = re.findall(r'Content-Type:[^\r\n]+', headers)
        if content_types and 'text/html' in content_types[-1]:
            tty.warn("The contents of " + self.archive_file + " look like HTML.",
                     "The checksum will likely be bad.  If it is, you can use",
                     "'spack clean --dist' to remove the bad archive, then fix",
                     "your internet gateway issue and install again.")
Beispiel #11
0
def get_build_system(args, guesser):
    """Determine the build system template.

    If a template is specified, always use that. Otherwise, if a URL
    is provided, download the tarball and peek inside to guess what
    build system it uses. Otherwise, use a generic template by default.

    Args:
        args (argparse.Namespace): The arguments given to ``spack create``
        guesser (BuildSystemGuesser): The first_stage_function given to
            ``spack checksum`` which records the build system it detects

    Returns:
        str: The name of the build system template to use
    """

    # Default template
    template = 'generic'

    if args.template:
        # Use a user-supplied template if one is present
        template = args.template
        tty.msg("Using specified package template: '{0}'".format(template))
    elif args.url:
        # Use whatever build system the guesser detected
        template = guesser.build_system
        if template == 'generic':
            tty.warn("Unable to detect a build system. "
                     "Using a generic package template.")
        else:
            msg = "This package looks like it uses the {0} build system"
            tty.msg(msg.format(template))

    return template
Beispiel #12
0
def get_config(section, scope=None):
    """Get configuration settings for a section.

       Strips off the top-level section name from the YAML dict.
    """
    validate_section_name(section)
    merged_section = syaml.syaml_dict()

    if scope is None:
        scopes = config_scopes.values()
    else:
        scopes = [validate_scope(scope)]

    for scope in scopes:
        # read potentially cached data from the scope.
        data = scope.get_section(section)

        # Skip empty configs
        if not data or not isinstance(data, dict):
            continue

        # Allow complete override of site config with '<section>::'
        override_key = section + ':'
        if not (section in data or override_key in data):
            tty.warn("Skipping bad configuration file: '%s'" % scope.path)
            continue

        if override_key in data:
            merged_section = data[override_key]
        else:
            merged_section = _merge_yaml(merged_section, data[section])

    return merged_section
Beispiel #13
0
def setup_main_options(args):
    """Configure spack globals based on the basic options."""
    # Set up environment based on args.
    tty.set_verbose(args.verbose)
    tty.set_debug(args.debug)
    tty.set_stacktrace(args.stacktrace)

    # debug must be set first so that it can even affect behvaior of
    # errors raised by spack.config.
    if args.debug:
        spack.error.debug = True
        spack.util.debug.register_interrupt_handler()
        spack.config.set('config:debug', True, scope='command_line')

    # override lock configuration if passed on command line
    if args.locks is not None:
        spack.util.lock.check_lock_safety(spack.paths.prefix)
        spack.config.set('config:locks', False, scope='command_line')

    if args.mock:
        rp = spack.repo.RepoPath(spack.paths.mock_packages_path)
        spack.repo.set_path(rp)

    # If the user asked for it, don't check ssl certs.
    if args.insecure:
        tty.warn("You asked for --insecure. Will NOT check SSL certificates.")
        spack.config.set('config:verify_ssl', False, scope='command_line')

    # when to use color (takes always, auto, or never)
    tty.color.set_color_when(args.color)
Beispiel #14
0
def view(parser, args):
    'Produce a view of a set of packages.'

    specs = spack.cmd.parse_specs(args.specs)
    path = args.path[0]

    view = YamlFilesystemView(
        path, spack.store.layout,
        ignore_conflicts=getattr(args, "ignore_conflicts", False),
        link=os.link if args.action in ["hardlink", "hard"]
        else os.symlink,
        verbose=args.verbose)

    # Process common args and specs
    if getattr(args, "all", False):
        specs = view.get_all_specs()
        if len(specs) == 0:
            tty.warn("Found no specs in %s" % path)

    elif args.action in actions_link:
        # only link commands need to disambiguate specs
        specs = [spack.cmd.disambiguate_spec(s) for s in specs]

    elif args.action in actions_status:
        # no specs implies all
        if len(specs) == 0:
            specs = view.get_all_specs()
        else:
            specs = relaxed_disambiguate(specs, view)

    else:
        # status and remove can map the name to packages in view
        specs = relaxed_disambiguate(specs, view)

    with_dependencies = args.dependencies.lower() in ['true', 'yes']

    # Map action to corresponding functionality
    if args.action in actions_link:
        try:
            view.add_specs(*specs,
                           with_dependencies=with_dependencies,
                           exclude=args.exclude)
        except MergeConflictError:
            tty.info("Some file blocked the merge, adding the '-i' flag will "
                     "ignore this conflict. For more information see e.g. "
                     "https://github.com/spack/spack/issues/9029")
            raise

    elif args.action in actions_remove:
        view.remove_specs(*specs,
                          with_dependencies=with_dependencies,
                          exclude=args.exclude,
                          with_dependents=not args.no_remove_dependents)

    elif args.action in actions_status:
        view.print_status(*specs, with_dependencies=with_dependencies)

    else:
        tty.error('Unknown action: "%s"' % args.action)
Beispiel #15
0
def ask_for_confirmation(message):
    while True:
        tty.msg(message + '[y/n]')
        choice = raw_input().lower()
        if choice == 'y':
            break
        elif choice == 'n':
            raise SystemExit('Operation aborted')
        tty.warn('Please reply either "y" or "n"')
Beispiel #16
0
 def clean(self):
     """By default just runs make clean.  Override if this isn't good."""
     try:
         # TODO: should we really call make clean, ro just blow away the directory?
         make = build_env.MakeExecutable('make', self.parallel)
         make('clean')
         tty.msg("Successfully cleaned %s" % self.name)
     except subprocess.CalledProcessError, e:
         tty.warn("Warning: 'make clean' didn't work.  Consider 'spack clean --work'.")
Beispiel #17
0
    def __call__(self, *args, **kwargs):
        """Run the executable with subprocess.check_output, return output."""
        return_output = kwargs.get("return_output", False)
        fail_on_error = kwargs.get("fail_on_error", True)
        ignore_errors = kwargs.get("ignore_errors", ())

        output        = kwargs.get("output", sys.stdout)
        error         = kwargs.get("error", sys.stderr)
        input         = kwargs.get("input", None)

        def streamify(arg, mode):
            if isinstance(arg, basestring):
                return open(arg, mode), True
            elif arg is None and mode != 'r':
                return open(os.devnull, mode), True
            return arg, False
        output, ostream = streamify(output, 'w')
        error,  estream = streamify(error,  'w')
        input,  istream = streamify(input,  'r')

        # if they just want to ignore one error code, make it a tuple.
        if isinstance(ignore_errors, int):
            ignore_errors = (ignore_errors,)

        quoted_args = [arg for arg in args if re.search(r'^"|^\'|"$|\'$', arg)]
        if quoted_args:
            tty.warn("Quotes in command arguments can confuse scripts like configure.",
                     "The following arguments may cause problems when executed:",
                     str("\n".join(["    "+arg for arg in quoted_args])),
                     "Quotes aren't needed because spack doesn't use a shell.",
                     "Consider removing them")

        cmd = self.exe + list(args)

        cmd_line = ' '.join(cmd)
        tty.debug(cmd_line)

        try:
            proc = subprocess.Popen(
                cmd,
                stdin=input,
                stderr=error,
                stdout=subprocess.PIPE if return_output else output)
            out, err = proc.communicate()
            self.returncode = proc.returncode

            rc = proc.returncode
            if fail_on_error and rc != 0 and (rc not in ignore_errors):
                raise ProcessError("Command exited with status %d:"
                                   % proc.returncode, cmd_line)
            if return_output:
                return out

        except OSError, e:
            raise ProcessError(
                "%s: %s" % (self.exe[0], e.strerror),
                "Command: " + cmd_line)
Beispiel #18
0
 def process_environment_command(self, env):
     for command in env:
         try:
             yield self.environment_modifications_formats[type(
                 command)].format(**command.args)
         except KeyError:
             message = 'Cannot handle command of type {command} : skipping request'  # NOQA: ignore=E501
             details = '{context} at {filename}:{lineno}'
             tty.warn(message.format(command=type(command)))
             tty.warn(details.format(**command.args))
Beispiel #19
0
def _for_each_enabled(spec, method_name):
    """Calls a method for each enabled module"""
    for name in enabled:
        generator = spack.modules.module_types[name](spec)
        try:
            getattr(generator, method_name)()
        except RuntimeError as e:
            msg = 'cannot perform the requested {0} operation on module files'
            msg += ' [{1}]'
            tty.warn(msg.format(method_name, str(e)))
Beispiel #20
0
def get_specs(force=False):
    """
    Get spec.yaml's for build caches available on mirror
    """
    global _cached_specs

    if _cached_specs:
        tty.debug("Using previously-retrieved specs")
        return _cached_specs

    mirrors = spack.config.get('mirrors')
    if len(mirrors) == 0:
        tty.warn("No Spack mirrors are currently configured")
        return {}

    path = str(spack.architecture.sys_type())
    urls = set()
    for key in mirrors:
        url = mirrors[key]
        if url.startswith('file'):
            mirror = url.replace('file://', '') + '/build_cache'
            tty.msg("Finding buildcaches in %s" % mirror)
            if os.path.exists(mirror):
                files = os.listdir(mirror)
                for file in files:
                    if re.search('spec.yaml', file):
                        link = 'file://' + mirror + '/' + file
                        urls.add(link)
        else:
            tty.msg("Finding buildcaches on %s" % url)
            p, links = spider(url + "/build_cache")
            for link in links:
                if re.search("spec.yaml", link) and re.search(path, link):
                    urls.add(link)

    _cached_specs = set()
    for link in urls:
        with Stage(link, name="build_cache", keep=True) as stage:
            if force and os.path.exists(stage.save_filename):
                os.remove(stage.save_filename)
            if not os.path.exists(stage.save_filename):
                try:
                    stage.fetch()
                except fs.FetchError:
                    continue
            with open(stage.save_filename, 'r') as f:
                # read the spec from the build cache file. All specs
                # in build caches are concrete (as they are built) so
                # we need to mark this spec concrete on read-in.
                spec = spack.spec.Spec.from_yaml(f)
                spec._mark_concrete()
                _cached_specs.add(spec)

    return _cached_specs
Beispiel #21
0
 def _check_no_ext_conflicts(self, spec):
     """
         Check that there is no extension conflict for specs.
     """
     extendee = spec.package.extendee_spec
     try:
         self.extensions_layout.check_extension_conflict(extendee, spec)
     except ExtensionAlreadyInstalledError:
         # we print the warning here because later on the order in which
         # packages get activated is not clear (set-sorting)
         tty.warn(self._croot +
                  'Skipping already activated package: %s' % spec.name)
Beispiel #22
0
 def check(self):
     """Check the downloaded archive against a checksum digest.
        No-op if this stage checks code out of a repository."""
     if self.fetcher is not self.default_fetcher and self.skip_checksum_for_mirror:
         tty.warn("Fetching from mirror without a checksum!",
                  "This package is normally checked out from a version "
                  "control system, but it has been archived on a spack "
                  "mirror.  This means we cannot know a checksum for the "
                  "tarball in advance. Be sure that your connection to "
                  "this mirror is secure!.")
     else:
         self.fetcher.check()
Beispiel #23
0
    def _add(self, spec, directory_layout=None, explicit=False):
        """Add an install record for this spec to the database.

        Assumes spec is installed in ``layout.path_for_spec(spec)``.

        Also ensures dependencies are present and updated in the DB as
        either intsalled or missing.

        """
        if not spec.concrete:
            raise NonConcreteSpecAddError(
                "Specs added to DB must be concrete.")

        for dep in spec.dependencies(_tracked_deps):
            dkey = dep.dag_hash()
            if dkey not in self._data:
                self._add(dep, directory_layout, explicit=False)

        key = spec.dag_hash()
        if key not in self._data:
            installed = bool(spec.external)
            path = None
            if not spec.external and directory_layout:
                path = directory_layout.path_for_spec(spec)
                try:
                    directory_layout.check_installed(spec)
                    installed = True
                except DirectoryLayoutError as e:
                    tty.warn(
                        'Dependency missing due to corrupt install directory:',
                        path, str(e))

            # Create a new install record with no deps initially.
            new_spec = spec.copy(deps=False)
            self._data[key] = InstallRecord(
                new_spec, path, installed, ref_count=0, explicit=explicit)

            # Connect dependencies from the DB to the new copy.
            for name, dep in iteritems(spec.dependencies_dict(_tracked_deps)):
                dkey = dep.spec.dag_hash()
                new_spec._add_dependency(self._data[dkey].spec, dep.deptypes)
                self._data[dkey].ref_count += 1

            # Mark concrete once everything is built, and preserve
            # the original hash of concrete specs.
            new_spec._mark_concrete()
            new_spec._hash = key

        else:
            # If it is already there, mark it as installed.
            self._data[key].installed = True

        self._data[key].explicit = explicit
Beispiel #24
0
def add_single_spec(spec, mirror_root, categories, **kwargs):
    tty.msg("Adding package {pkg} to mirror".format(pkg=spec.format("$_$@")))
    spec_exists_in_mirror = True
    try:
        with spec.package.stage:
            # fetcher = stage.fetcher
            # fetcher.fetch()
            # ...
            # fetcher.archive(archive_path)
            for ii, stage in enumerate(spec.package.stage):
                fetcher = stage.fetcher
                if ii == 0:
                    # create a subdirectory for the current package@version
                    archive_path = os.path.abspath(join_path(
                        mirror_root, mirror_archive_path(spec, fetcher)))
                    name = spec.format("$_$@")
                else:
                    resource = stage.resource
                    archive_path = os.path.abspath(join_path(
                        mirror_root,
                        mirror_archive_path(spec, fetcher, resource.name)))
                    name = "{resource} ({pkg}).".format(
                        resource=resource.name, pkg=spec.format("$_$@"))
                subdir = os.path.dirname(archive_path)
                mkdirp(subdir)

                if os.path.exists(archive_path):
                    tty.msg("{name} : already added".format(name=name))
                else:
                    spec_exists_in_mirror = False
                    fetcher.fetch()
                    if not kwargs.get('no_checksum', False):
                        fetcher.check()
                        tty.msg("{name} : checksum passed".format(name=name))

                    # Fetchers have to know how to archive their files.  Use
                    # that to move/copy/create an archive in the mirror.
                    fetcher.archive(archive_path)
                    tty.msg("{name} : added".format(name=name))

        if spec_exists_in_mirror:
            categories['present'].append(spec)
        else:
            categories['mirrored'].append(spec)

    except Exception as e:
        if spack.debug:
            sys.excepthook(*sys.exc_info())
        else:
            tty.warn("Error while fetching %s"
                     % spec.format('$_$@'), e.message)
        categories['error'].append(spec)
Beispiel #25
0
def get_config(section, scope=None):
    """Get configuration settings for a section.

    If ``scope`` is ``None`` or not provided, return the merged contents
    of all of Spack's configuration scopes.  If ``scope`` is provided,
    return only the confiugration as specified in that scope.

    This off the top-level name from the YAML section.  That is, for a
    YAML config file that looks like this::

       config:
         install_tree: $spack/opt/spack
         module_roots:
           lmod:   $spack/share/spack/lmod

    ``get_config('config')`` will return::

       { 'install_tree': '$spack/opt/spack',
         'module_roots: {
             'lmod': '$spack/share/spack/lmod'
         }
       }

    """
    validate_section_name(section)
    merged_section = syaml.syaml_dict()

    if scope is None:
        scopes = config_scopes.values()
    else:
        scopes = [validate_scope(scope)]

    for scope in scopes:
        # read potentially cached data from the scope.
        data = scope.get_section(section)

        # Skip empty configs
        if not data or not isinstance(data, dict):
            continue

        if section not in data:
            tty.warn("Skipping bad configuration file: '%s'" % scope.path)
            continue

        merged_section = _merge_yaml(merged_section, data)

    # no config files -- empty config.
    if section not in merged_section:
        return {}

    # take the top key off before returning.
    return merged_section[section]
Beispiel #26
0
    def setup_environment(self, spack_env, run_env):
        spec = self.spec

        # TODO: The '--no-user-cfg' option for Python installation is only in
        # Python v2.7 and v3.4+ (see https://bugs.python.org/issue1180) and
        # adding support for ignoring user configuration will require
        # significant changes to this package for other Python versions.
        if not spec.satisfies('@2.7,3.4:'):
            tty.warn(('Python v{0} may not install properly if Python '
                      'user configurations are present.').format(self.version))

        # Need this to allow python build to find the Python installation.
        spack_env.set('MACOSX_DEPLOYMENT_TARGET', platform.mac_ver()[0])
Beispiel #27
0
    def remove_specs(self, *specs, **kwargs):
        assert all((s.concrete for s in specs))
        with_dependents = kwargs.get("with_dependents", True)
        with_dependencies = kwargs.get("with_dependencies", False)

        specs = set(specs)

        if with_dependencies:
            specs = get_dependencies(specs)

        if kwargs.get("exclude", None):
            specs = set(filter_exclude(specs, kwargs["exclude"]))

        all_specs = set(self.get_all_specs())

        to_deactivate = specs
        to_keep = all_specs - to_deactivate

        dependents = find_dependents(to_keep, to_deactivate)

        if with_dependents:
            # remove all packages depending on the ones to remove
            if len(dependents) > 0:
                tty.warn(self._croot +
                         "The following dependents will be removed: %s"
                         % ", ".join((s.name for s in dependents)))
                to_deactivate.update(dependents)
        elif len(dependents) > 0:
            tty.warn(self._croot +
                     "The following packages will be unusable: %s"
                     % ", ".join((s.name for s in dependents)))

        extensions = set(filter(lambda s: s.package.is_extension,
                         to_deactivate))
        standalones = to_deactivate - extensions

        # Please note that a traversal of the DAG in post-order and then
        # forcibly removing each package should remove the need to specify
        # with_dependents for deactivating extensions/allow removal without
        # additional checks (force=True). If removal performance becomes
        # unbearable for whatever reason, this should be the first point of
        # attack.
        #
        # see: https://github.com/spack/spack/pull/3227#discussion_r117147475
        remove_extension = ft.partial(self.remove_extension,
                                      with_dependents=with_dependents)

        set(map(remove_extension, extensions))
        set(map(self.remove_standalone, standalones))

        self.purge_empty_directories()
Beispiel #28
0
 def autoreconf(self, spec, prefix):
     """Not needed usually, configure should be already there"""
     # If configure exists nothing needs to be done
     if os.path.exists(self.configure_abs_path):
         return
     # Else try to regenerate it
     autotools = ['m4', 'autoconf', 'automake', 'libtool']
     missing = [x for x in autotools if x not in spec]
     if missing:
         msg = 'Cannot generate configure: missing dependencies {0}'
         raise RuntimeError(msg.format(missing))
     tty.msg('Configure script not found: trying to generate it')
     tty.warn('*********************************************************')
     tty.warn('* If the default procedure fails, consider implementing *')
     tty.warn('*        a custom AUTORECONF phase in the package       *')
     tty.warn('*********************************************************')
     with working_dir(self.configure_directory):
         m = inspect.getmodule(self)
         # This part should be redundant in principle, but
         # won't hurt
         m.libtoolize()
         m.aclocal()
         # This line is what is needed most of the time
         # --install, --verbose, --force
         autoreconf_args = ['-ivf']
         if 'pkg-config' in spec:
             autoreconf_args += [
                 '-I',
                 join_path(spec['pkg-config'].prefix, 'share', 'aclocal'),
             ]
         autoreconf_args += self.autoreconf_extra_args
         m.autoreconf(*autoreconf_args)
Beispiel #29
0
    def remove_standalone(self, spec):
        """
            Remove (unlink) a standalone package from this view.
        """
        if not self.check_added(spec):
            tty.warn(self._croot +
                     'Skipping package not linked in view: %s' % spec.name)
            return

        self.unmerge(spec)
        self.unlink_meta_folder(spec)

        if self.verbose:
            tty.info(self._croot + 'Removed package: %s' % colorize_spec(spec))
Beispiel #30
0
    def _create_new_cache(self):
        """Create a new cache for packages in a repo.

        The implementation here should try to minimize filesystem
        calls.  At the moment, it is O(number of packages) and makes
        about one stat call per package.  This is reasonably fast, and
        avoids actually importing packages in Spack, which is slow.
        """
        # Create a dictionary that will store the mapping between a
        # package name and its stat info
        cache = {}
        for pkg_name in os.listdir(self.packages_path):
            # Skip non-directories in the package root.
            pkg_dir = os.path.join(self.packages_path, pkg_name)

            # Warn about invalid names that look like packages.
            if not valid_module_name(pkg_name):
                msg = 'Skipping package at {0}. '
                msg += '"{1}" is not a valid Spack module name.'
                tty.warn(msg.format(pkg_dir, pkg_name))
                continue

            # Construct the file name from the directory
            pkg_file = os.path.join(
                self.packages_path, pkg_name, package_file_name
            )

            # Use stat here to avoid lots of calls to the filesystem.
            try:
                sinfo = os.stat(pkg_file)
            except OSError as e:
                if e.errno == errno.ENOENT:
                    # No package.py file here.
                    continue
                elif e.errno == errno.EACCES:
                    tty.warn("Can't read package file %s." % pkg_file)
                    continue
                raise e

            # If it's not a file, skip it.
            if stat.S_ISDIR(sinfo.st_mode):
                continue

            # If it is a file, then save the stats under the
            # appropriate key
            cache[pkg_name] = sinfo

        return cache
Beispiel #31
0
    def test(self):
        config_args = self._generate_make_hdr_for_test()

        # Write configuration options to make.inc file
        make_file_inc = join_path(self.test_suite.current_test_cache_dir,
                                  self.make_hdr_file)
        with open(make_file_inc, 'w') as inc:
            for option in config_args:
                inc.write('{0}\n'.format(option))

        args = []
        if self.version < Version('5.2.2'):
            args.append('HEADER=' + self.prefix.include)
        args.append('superlu')

        test_dir = join_path(self.test_suite.current_test_cache_dir,
                             self.examples_src_dir)
        exe = 'superlu'

        if not os.path.isfile(join_path(test_dir, '{0}.c'.format(exe))):
            tty.warn('Skipping superlu test:' 'missing file {0}.c'.format(exe))
            return

        self.run_superlu_test(test_dir, exe, args)
Beispiel #32
0
    def dump_provenance(self, spec, path):
        """Dump provenance information for a spec to a particular path.

           This dumps the package file and any associated patch files.
           Raises UnknownPackageError if not found.
        """
        if spec.namespace and spec.namespace != self.namespace:
            raise UnknownPackageError(
                "Repository %s does not contain package %s." %
                (self.namespace, spec.fullname))

        # Install patch files needed by the package.
        fs.mkdirp(path)
        for patch in itertools.chain.from_iterable(
                spec.package.patches.values()):

            if patch.path:
                if os.path.exists(patch.path):
                    fs.install(patch.path, path)
                else:
                    tty.warn("Patch file did not exist: %s" % patch.path)

        # Install the package.py file itself.
        fs.install(self.filename_for_package_name(spec.name), path)
Beispiel #33
0
    def remove_path(self, dirname):
        """Remove `dirname` from the modulepath

        Parameters
        ----------
        dirname : str
            The directory to remove

        Returns
        -------
        modules_in_dir : list of Module
            The modules in the directory that was removed

        """
        dirname = Path.expand_name(dirname)
        if dirname not in self:  # pragma: no cover
            tty.warn("Modulepath: {0!r} is not in modulepath".format(dirname))
            return []

        modules_in_dir = self.getby_dirname(dirname)
        self.path.pop(self.index(dirname))
        self.path_modified()

        return modules_in_dir
Beispiel #34
0
    def add_standalone(self, spec):
        if spec.package.is_extension:
            tty.error(self._croot + 'Package %s is an extension.'
                      % spec.name)
            return False

        if spec.external:
            tty.warn(self._croot + 'Skipping external package: %s'
                     % colorize_spec(spec))
            return True

        if self.check_added(spec):
            tty.warn(self._croot + 'Skipping already linked package: %s'
                     % colorize_spec(spec))
            return True

        if spec.package.extendable:
            # Check for globally activated extensions in the extendee that
            # we're looking at.
            activated = [p.spec for p in
                         spack.store.db.activated_extensions_for(spec)]
            if activated:
                tty.error("Globally activated extensions cannot be used in "
                          "conjunction with filesystem views. "
                          "Please deactivate the following specs: ")
                spack.cmd.display_specs(activated, flags=True, variants=True,
                                        long=False)
                return False

        self.merge(spec)

        self.link_meta_folder(spec)

        if self.verbose:
            tty.info(self._croot + 'Linked package: %s' % colorize_spec(spec))
        return True
Beispiel #35
0
def create_stage_root(path):
    # type: (str) -> None
    """Create the stage root directory and ensure appropriate access perms."""
    assert os.path.isabs(path) and len(path.strip()) > 1

    err_msg = 'Cannot create stage root {0}: Access to {1} is denied'

    user_uid = getuid()

    # Obtain lists of ancestor and descendant paths of the $user node, if any.
    group_paths, user_node, user_paths = partition_path(
        path, getpass.getuser())

    for p in group_paths:
        if not os.path.exists(p):
            # Ensure access controls of subdirs created above `$user` inherit
            # from the parent and share the group.
            par_stat = os.stat(os.path.dirname(p))
            mkdirp(p, group=par_stat.st_gid, mode=par_stat.st_mode)

            p_stat = os.stat(p)
            if par_stat.st_gid != p_stat.st_gid:
                tty.warn(
                    "Expected {0} to have group {1}, but it is {2}".format(
                        p, par_stat.st_gid, p_stat.st_gid))

            if par_stat.st_mode & p_stat.st_mode != par_stat.st_mode:
                tty.warn(
                    "Expected {0} to support mode {1}, but it is {2}".format(
                        p, par_stat.st_mode, p_stat.st_mode))

            if not can_access(p):
                raise OSError(errno.EACCES, err_msg.format(path, p))

    # Add the path ending with the $user node to the user paths to ensure paths
    # from $user (on down) meet the ownership and permission requirements.
    if user_node:
        user_paths.insert(0, user_node)

    for p in user_paths:
        # Ensure access controls of subdirs from `$user` on down are
        # restricted to the user.
        owner_uid = get_owner_uid(p)
        if user_uid != owner_uid:
            tty.warn(
                "Expected user {0} to own {1}, but it is owned by {2}".format(
                    user_uid, p, owner_uid))

    spack_src_subdir = os.path.join(path, _source_path_subdir)
    # When staging into a user-specified directory with `spack stage -p <PATH>`, we need
    # to ensure the `spack-src` subdirectory exists, as we can't rely on it being
    # created automatically by spack. It's not clear why this is the case for `spack
    # stage -p`, but since `mkdirp()` is idempotent, this should not change the behavior
    # for any other code paths.
    if not os.path.isdir(spack_src_subdir):
        mkdirp(spack_src_subdir, mode=stat.S_IRWXU)
Beispiel #36
0
def relocate_links(links, orig_layout_root, orig_install_prefix,
                   new_install_prefix):
    """Relocate links to a new install prefix.

    The symbolic links are relative to the original installation prefix.
    The old link target is read and the placeholder is replaced by the old
    layout root. If the old link target is in the old install prefix, the new
    link target is create by replacing the old install prefix with the new
    install prefix.

    Args:
        links (list): list of links to be relocated
        orig_layout_root (str): original layout root
        orig_install_prefix (str): install prefix of the original installation
        new_install_prefix (str): install prefix where we want to relocate
    """
    placeholder = _placeholder(orig_layout_root)
    abs_links = [os.path.join(new_install_prefix, link) for link in links]
    for abs_link in abs_links:
        link_target = os.readlink(abs_link)
        link_target = re.sub(placeholder, orig_layout_root, link_target)
        # If the link points to a file in the original install prefix,
        # compute the corresponding target in the new prefix and relink
        if link_target.startswith(orig_install_prefix):
            link_target = re.sub(orig_install_prefix, new_install_prefix,
                                 link_target)
            os.unlink(abs_link)
            os.symlink(link_target, abs_link)

        # If the link is absolute and has not been relocated then
        # warn the user about that
        if (os.path.isabs(link_target)
                and not link_target.startswith(new_install_prefix)):
            msg = ('Link target "{0}" for symbolic link "{1}" is outside'
                   ' of the new install prefix {2}')
            tty.warn(msg.format(link_target, abs_link, new_install_prefix))
Beispiel #37
0
def _elf_rpaths_for(path):
    """Return the RPATHs for an executable or a library.

    The RPATHs are obtained by ``patchelf --print-rpath PATH``.

    Args:
        path (str): full path to the executable or library

    Return:
        RPATHs as a list of strings.
    """
    # If we're relocating patchelf itself, use it
    patchelf_path = path if path.endswith("/bin/patchelf") else _patchelf()
    patchelf = executable.Executable(patchelf_path)

    output = ''
    try:
        output = patchelf('--print-rpath', path, output=str, error=str)
        output = output.strip('\n')
    except executable.ProcessError as e:
        msg = 'patchelf --print-rpath {0} produced an error [{1}]'
        tty.warn(msg.format(path, str(e)))

    return output.split(':') if output else []
Beispiel #38
0
def _set_elf_rpaths(target, rpaths):
    """Replace the original RPATH of the target with the paths passed
    as arguments.

    This function uses ``patchelf`` to set RPATHs.

    Args:
        target: target executable. Must be an ELF object.
        rpaths: paths to be set in the RPATH

    Returns:
        A string concatenating the stdout and stderr of the call
        to ``patchelf``
    """
    # Join the paths using ':' as a separator
    rpaths_str = ':'.join(rpaths)

    # If we're relocating patchelf itself, make a copy and use it
    bak_path = None
    if target.endswith("/bin/patchelf"):
        bak_path = target + ".bak"
        shutil.copy(target, bak_path)

    patchelf, output = executable.Executable(bak_path or _patchelf()), None
    try:
        # TODO: revisit the use of --force-rpath as it might be conditional
        # TODO: if we want to support setting RUNPATH from binary packages
        patchelf_args = ['--force-rpath', '--set-rpath', rpaths_str, target]
        output = patchelf(*patchelf_args, output=str, error=str)
    except executable.ProcessError as e:
        msg = 'patchelf --force-rpath --set-rpath {0} failed with error {1}'
        tty.warn(msg.format(target, e))
    finally:
        if bak_path and os.path.exists(bak_path):
            os.remove(bak_path)
    return output
Beispiel #39
0
def get_existing_elf_rpaths(path_name):
    """
    Return the RPATHS returned by patchelf --print-rpath path_name
    as a list of strings.
    """

    # if we're relocating patchelf itself, use it

    if path_name[-13:] == "/bin/patchelf":
        patchelf = Executable(path_name)
    else:
        patchelf = Executable(get_patchelf())

    rpaths = list()
    try:
        output = patchelf('--print-rpath',
                          '%s' % path_name,
                          output=str,
                          error=str)
        rpaths = output.rstrip('\n').split(':')
    except ProcessError as e:
        msg = 'patchelf --print-rpath %s produced an error %s' % (path_name, e)
        tty.warn(msg)
    return rpaths
Beispiel #40
0
    def caveats(self):
        perm_script = 'spack_perms_fix.sh'
        perm_script_path = join_path(self.spec.prefix.bin, perm_script)
        with open(perm_script_path, 'w') as f:
            env = spack.tengine.make_environment(dirs=self.package_dir)
            t = env.get_template(perm_script + '.j2')
            f.write(t.render({'prefix': self.spec.prefix}))
        chmod = which('chmod')
        chmod('0555', perm_script_path)

        tty.warn("""
For a working GaussianView installation, all executable files can only be accessible by
the owner and the group but not the world.

We've installed a script that will make the necessary changes;
read through it and then execute it:

    {0}

If you have to give others access, please customize the group membership of the package
files as documented here:

    https://spack.readthedocs.io/en/latest/build_settings.html#package-permissions"""
                 .format(perm_script_path))
    def remove_files(self, files):
        def needs_file(spec, file):
            # convert the file we want to remove to a source in this spec
            projection = self.get_projection_for_spec(spec)
            relative_path = os.path.relpath(file, projection)
            test_path = os.path.join(spec.prefix, relative_path)

            # check if this spec owns a file of that name (through the
            # manifest in the metadata dir, which we have in the view).
            manifest_file = os.path.join(self.get_path_meta_folder(spec),
                                         spack.store.layout.manifest_file_name)
            try:
                with open(manifest_file, 'r') as f:
                    manifest = s_json.load(f)
            except (OSError, IOError):
                # if we can't load it, assume it doesn't know about the file.
                manifest = {}
            return test_path in manifest

        specs = self.get_all_specs()

        for file in files:
            if not os.path.lexists(file):
                tty.warn("Tried to remove %s which does not exist" % file)
                continue

            # remove if file is not owned by any other package in the view
            # This will only be false if two packages are merged into a prefix
            # and have a conflicting file

            # check all specs for whether they own the file. That include the spec
            # we are currently removing, as we remove files before unlinking the
            # metadata directory.
            if len([s for s in specs if needs_file(s, file)]) <= 1:
                tty.debug("Removing file " + file)
                os.remove(file)
Beispiel #42
0
def setup_main_options(args):
    """Configure spack globals based on the basic options."""
    # Assign a custom function to show warnings
    warnings.showwarning = send_warning_to_tty

    # Set up environment based on args.
    tty.set_verbose(args.verbose)
    tty.set_debug(args.debug)
    tty.set_stacktrace(args.stacktrace)

    # debug must be set first so that it can even affect behvaior of
    # errors raised by spack.config.
    if args.debug:
        spack.error.debug = True
        spack.util.debug.register_interrupt_handler()
        spack.config.set('config:debug', True, scope='command_line')

    if args.timestamp:
        tty.set_timestamp(True)

    # override lock configuration if passed on command line
    if args.locks is not None:
        spack.util.lock.check_lock_safety(spack.paths.prefix)
        spack.config.set('config:locks', False, scope='command_line')

    if args.mock:
        rp = spack.repo.RepoPath(spack.paths.mock_packages_path)
        spack.repo.set_path(rp)

    # If the user asked for it, don't check ssl certs.
    if args.insecure:
        tty.warn("You asked for --insecure. Will NOT check SSL certificates.")
        spack.config.set('config:verify_ssl', False, scope='command_line')

    # when to use color (takes always, auto, or never)
    color.set_color_when(args.color)
Beispiel #43
0
def _for_each_enabled(spec, method_name):
    """Calls a method for each enabled module"""
    set_names = set(spack.config.get('modules', {}).keys())
    # If we have old-style modules enabled, we put those in the default set
    old_default_enabled = spack.config.get('modules:enable')
    if old_default_enabled:
        set_names.add('default')
    for name in set_names:
        enabled = spack.config.get('modules:%s:enable' % name)
        if name == 'default':
            # combine enabled modules from default and old format
            enabled = spack.config.merge_yaml(old_default_enabled,  enabled)
        if not enabled:
            tty.debug('NO MODULE WRITTEN: list of enabled module files is empty')
            continue

        for type in enabled:
            generator = spack.modules.module_types[type](spec, name)
            try:
                getattr(generator, method_name)()
            except RuntimeError as e:
                msg = 'cannot perform the requested {0} operation on module files'
                msg += ' [{1}]'
                tty.warn(msg.format(method_name, str(e)))
Beispiel #44
0
    def configure_args(self):
        spec = self.spec

        args = [
            '--enable-ndb', '--disable-inhibit-plugin',
            '--with-crypto={0}'.format(spec.variants['crypto'].value)
        ]

        args += self.enable_or_disable('nls')
        args += self.enable_or_disable('sqlite')
        args += self.with_or_without('selinux')
        args += self.with_or_without('python')
        # OpenMP multithreading support automatically enabled if C compiler has
        # support for OpenMP version 4.5 or higher
        args += self.enable_or_disable('openmp')

        # Option got removed in 4.17
        if self.spec.satisfies('@:4.16'):
            args += self.with_or_without('lua')

        # Legacy berkely db support
        if 'berkeley-db=full' in spec:
            args.extend(['--enable-bdb', '--disable-bdb-ro'])
        elif 'berkeley-db=readonly' in spec:
            args.extend(['--disable-bdb', '--enable-bdb-ro'])
        else:
            args.extend(['--disable-bdb', '--disable-bdb-ro'])

        # enable POSIX.1e draft 15 file capabilities support
        if '+posix' in spec:
            args.append('--with-cap')

        if 'crypto=openssl' in spec:
            tty.warn(openssl_warning)

        return args
Beispiel #45
0
    def add_standalone(self, spec):
        if spec.package.is_extension:
            tty.error(self._croot + 'Package %s is an extension.' % spec.name)
            return False

        if spec.external:
            tty.warn(self._croot +
                     'Skipping external package: %s' % colorize_spec(spec))
            return True

        if self.check_added(spec):
            tty.warn(self._croot + 'Skipping already linked package: %s' %
                     colorize_spec(spec))
            return True

        if spec.package.extendable:
            # Check for globally activated extensions in the extendee that
            # we're looking at.
            activated = [
                p.spec for p in spack.store.db.activated_extensions_for(spec)
            ]
            if activated:
                tty.error("Globally activated extensions cannot be used in "
                          "conjunction with filesystem views. "
                          "Please deactivate the following specs: ")
                spack.cmd.display_specs(activated,
                                        flags=True,
                                        variants=True,
                                        long=False)
                return False

        tree = LinkTree(spec.prefix)

        if not self.ignore_conflicts:
            conflict = tree.find_conflict(self.root)
            if conflict is not None:
                tty.error(self._croot +
                          "Cannot link package %s, file already exists: %s" %
                          (spec.name, conflict))
                return False

        conflicts = tree.merge(self.root,
                               link=self.link,
                               ignore=ignore_metadata_dir,
                               ignore_conflicts=self.ignore_conflicts)
        self.link_meta_folder(spec)

        if self.ignore_conflicts:
            for c in conflicts:
                tty.warn(self._croot + "Could not link: %s" % c)

        if self.verbose:
            tty.info(self._croot + 'Linked package: %s' % colorize_spec(spec))
        return True
Beispiel #46
0
def _create_stage_root(path):
    """Create the stage root directory and ensure appropriate access perms."""
    assert path.startswith(os.path.sep) and len(path.strip()) > 1

    err_msg = 'Cannot create stage root {0}: Access to {1} is denied'

    user_uid = os.getuid()

    # Obtain lists of ancestor and descendant paths of the $user node, if any.
    group_paths, user_node, user_paths = partition_path(path,
                                                        getpass.getuser())

    for p in group_paths:
        if not os.path.exists(p):
            # Ensure access controls of subdirs created above `$user` inherit
            # from the parent and share the group.
            par_stat = os.stat(os.path.dirname(p))
            mkdirp(p, group=par_stat.st_gid, mode=par_stat.st_mode)

            p_stat = os.stat(p)
            if par_stat.st_gid != p_stat.st_gid:
                tty.warn("Expected {0} to have group {1}, but it is {2}"
                         .format(p, par_stat.st_gid, p_stat.st_gid))

            if par_stat.st_mode & p_stat.st_mode != par_stat.st_mode:
                tty.warn("Expected {0} to support mode {1}, but it is {2}"
                         .format(p, par_stat.st_mode, p_stat.st_mode))

            if not can_access(p):
                raise OSError(errno.EACCES, err_msg.format(path, p))

    # Add the path ending with the $user node to the user paths to ensure paths
    # from $user (on down) meet the ownership and permission requirements.
    if user_node:
        user_paths.insert(0, user_node)

    for p in user_paths:
        # Ensure access controls of subdirs from `$user` on down are
        # restricted to the user.
        if not os.path.exists(p):
            mkdirp(p, mode=stat.S_IRWXU)

            p_stat = os.stat(p)
            if p_stat.st_mode & stat.S_IRWXU != stat.S_IRWXU:
                tty.error("Expected {0} to support mode {1}, but it is {2}"
                          .format(p, stat.S_IRWXU, p_stat.st_mode))

                raise OSError(errno.EACCES, err_msg.format(path, p))
        else:
            p_stat = os.stat(p)

        if user_uid != p_stat.st_uid:
            tty.warn("Expected user {0} to own {1}, but it is owned by {2}"
                     .format(user_uid, p, p_stat.st_uid))
Beispiel #47
0
def md5(parser, args):
    if not args.files:
        setup_parser.parser.print_help()
        return 1

    results = []
    for url in args.files:
        try:
            checksum = compute_md5_checksum(url)
            results.append((checksum, url))
        except FailedDownloadError as e:
            tty.warn("Failed to fetch %s" % url)
            tty.warn("%s" % e)
        except IOError as e:
            tty.warn("Error when reading %s" % url)
            tty.warn("%s" % e)

    # Dump the MD5s at last without interleaving them with downloads
    tty.msg("%d MD5 checksums:" % len(results))
    for checksum, url in results:
        print "%s  %s" % (checksum, url)
Beispiel #48
0
def detect_scheduler():
    if (os.environ.get('CROSS') is None):
        srunbin = which('srun')
        if srunbin is None:
            aprunbin = which('aprun')
            if aprunbin is None:
                tty.warn("CROSS has not been set, however "
                         "cannot detect scheduler.")
                return 'none'
            else:
                tty.warn("CROSS has not been set, however "
                         "aprun has been found, assuming alps scheduler.")
                return 'alps'
        else:
            tty.warn("CROSS has not been set, however "
                     "srun has been found, assuming slurm scheduler.")
            return 'slurm'
    else:
        tty.warn("CROSS has been set to %s by the user." %
                 os.environ.get('CROSS'))
        return 'user'
Beispiel #49
0
def do_checksum(parser, args, algo):
    if not args.files:
        setup_parser.parser.print_help()
        return 1

    urls = [x for x in normalized(args.files)]
    results = []
    for url in urls:
        try:
            checksum = compute_checksum(url, algo)
            results.append((checksum, url))
        except FailedDownloadError as e:
            tty.warn("Failed to fetch %s" % url)
            tty.warn("%s" % e)
        except IOError as e:
            tty.warn("Error when reading %s" % url)
            tty.warn("%s" % e)

    # Dump the hashes last, without interleaving them with downloads
    checksum = 'checksum' if len(results) == 1 else 'checksums'
    tty.msg("%d %s %s:" % (len(results), algo, checksum))
    for checksum, url in results:
        print("{0}  {1}".format(checksum, url))
Beispiel #50
0
 def autoreconf(self, spec, prefix):
     """Not needed usually, configure should be already there"""
     # If configure exists nothing needs to be done
     if os.path.exists(self.configure_abs_path):
         return
     # Else try to regenerate it
     autotools = ['m4', 'autoconf', 'automake', 'libtool']
     missing = [x for x in autotools if x not in spec]
     if missing:
         msg = 'Cannot generate configure: missing dependencies {0}'
         raise RuntimeError(msg.format(missing))
     tty.msg('Configure script not found: trying to generate it')
     tty.warn('*********************************************************')
     tty.warn('* If the default procedure fails, consider implementing *')
     tty.warn('*        a custom AUTORECONF phase in the package       *')
     tty.warn('*********************************************************')
     with working_dir(self.configure_directory):
         m = inspect.getmodule(self)
         # This line is what is needed most of the time
         # --install, --verbose, --force
         autoreconf_args = ['-ivf']
         autoreconf_args += self.autoreconf_search_path_args
         autoreconf_args += self.autoreconf_extra_args
         m.autoreconf(*autoreconf_args)
Beispiel #51
0
def set_up_license(pkg):
    """Prompt the user, letting them know that a license is required.

    For packages that rely on license files, a global license file is
    created and opened for editing.

    For packages that rely on environment variables to point to a
    license, a warning message is printed.

    For all other packages, documentation on how to set up a license
    is printed."""

    # If the license can be stored in a file, create one
    if pkg.license_files:
        license_path = pkg.global_license_file
        if not os.path.exists(license_path):
            # Create a new license file
            write_license_file(pkg, license_path)
            # Open up file in user's favorite $EDITOR for editing
            spack.editor(license_path)
            tty.msg("Added global license file %s" % license_path)
        else:
            # Use already existing license file
            tty.msg("Found already existing license %s" % license_path)

    # If not a file, what about an environment variable?
    elif pkg.license_vars:
        tty.warn("A license is required to use %s. Please set %s to the "
                 "full pathname to the license file, or port@host if you"
                 " store your license keys on a dedicated license server" %
                 (pkg.name, ' or '.join(pkg.license_vars)))

    # If not a file or variable, suggest a website for further info
    elif pkg.license_url:
        tty.warn("A license is required to use %s. See %s for details" %
                 (pkg.name, pkg.license_url))

    # If all else fails, you're on your own
    else:
        tty.warn("A license is required to use %s" % pkg.name)
Beispiel #52
0
def get_origin_info(remote):
    git_dir = os.path.join(spack.paths.prefix, '.git')
    git = which('git', required=True)
    try:
        branch = git('symbolic-ref', '--short', 'HEAD', output=str)
    except ProcessError:
        branch = 'develop'
        tty.warn('No branch found; using default branch: %s' % branch)
    if remote == 'origin' and \
       branch not in ('master', 'develop'):
        branch = 'develop'
        tty.warn('Unknown branch found; using default branch: %s' % branch)
    try:
        origin_url = git(
            '--git-dir=%s' % git_dir,
            'config', '--get', 'remote.%s.url' % remote,
            output=str)
    except ProcessError:
        origin_url = _SPACK_UPSTREAM
        tty.warn('No git repository found; '
                 'using default upstream URL: %s' % origin_url)
    return (origin_url.strip(), branch.strip())
Beispiel #53
0
def install_tarball(spec, args):
    s = spack.spec.Spec(spec)
    if s.external or s.virtual:
        tty.warn("Skipping external or virtual package %s" % spec.format())
        return
    yes_to_all = False
    if args.yes_to_all:
        yes_to_all = True
    force = False
    if args.force:
        force = True
    for d in s.dependencies(deptype=('link', 'run')):
        tty.msg("Installing buildcache for dependency spec %s" % d)
        install_tarball(d, args)
    package = spack.repo.get(spec)
    if s.concrete and package.installed and not force:
        tty.warn("Package for spec %s already installed." % spec.format(),
                 " Use -f flag to overwrite.")
    else:
        tarball = bindist.download_tarball(spec)
        if tarball:
            tty.msg('Installing buildcache for spec %s' % spec.format())
            try:
                bindist.extract_tarball(spec, tarball, yes_to_all, force)
            except NoOverwriteException as e:
                tty.warn("%s exists. use -f to force overwrite." % e.args)
            except NoVerifyException:
                tty.die("Package spec file failed signature verification,"
                        " use -y flag to install build cache")
            except NoChecksumException:
                tty.die("Package tarball failed checksum verification,"
                        " use -y flag to install build cache")
            finally:
                spack.store.db.reindex(spack.store.layout)
        else:
            tty.die('Download of binary cache file for spec %s failed.' %
                    spec.format())
Beispiel #54
0
    def __call__(self, *args, **kwargs):
        """Run this executable in a subprocess.

        Parameters:
            *args (str): Command-line arguments to the executable to run

        Keyword Arguments:
            _dump_env (dict): Dict to be set to the environment actually
                used (envisaged for testing purposes only)
            env (dict or EnvironmentModifications): The environment with which
                to run the executable
            extra_env (dict or EnvironmentModifications): Extra items to add to
                the environment (neither requires nor precludes env)
            fail_on_error (bool): Raise an exception if the subprocess returns
                an error. Default is True. The return code is available as
                ``exe.returncode``
            ignore_errors (int or list): A list of error codes to ignore.
                If these codes are returned, this process will not raise
                an exception even if ``fail_on_error`` is set to ``True``
            ignore_quotes (bool): If False, warn users that quotes are not needed
                as Spack does not use a shell. Defaults to False.
            input: Where to read stdin from
            output: Where to send stdout
            error: Where to send stderr

        Accepted values for input, output, and error:

        * python streams, e.g. open Python file objects, or ``os.devnull``
        * filenames, which will be automatically opened for writing
        * ``str``, as in the Python string type. If you set these to ``str``,
          output and error will be written to pipes and returned as a string.
          If both ``output`` and ``error`` are set to ``str``, then one string
          is returned containing output concatenated with error. Not valid
          for ``input``
        * ``str.split``, as in the ``split`` method of the Python string type.
          Behaves the same as ``str``, except that value is also written to
          ``stdout`` or ``stderr``.

        By default, the subprocess inherits the parent's file descriptors.

        """
        # Environment
        env_arg = kwargs.get('env', None)

        # Setup default environment
        env = os.environ.copy() if env_arg is None else {}
        self.default_envmod.apply_modifications(env)
        env.update(self.default_env)

        from spack.util.environment import EnvironmentModifications  # no cycle

        # Apply env argument
        if isinstance(env_arg, EnvironmentModifications):
            env_arg.apply_modifications(env)
        elif env_arg:
            env.update(env_arg)

        # Apply extra env
        extra_env = kwargs.get('extra_env', {})
        if isinstance(extra_env, EnvironmentModifications):
            extra_env.apply_modifications(env)
        else:
            env.update(extra_env)

        if '_dump_env' in kwargs:
            kwargs['_dump_env'].clear()
            kwargs['_dump_env'].update(env)

        fail_on_error = kwargs.pop('fail_on_error', True)
        ignore_errors = kwargs.pop('ignore_errors', ())
        ignore_quotes = kwargs.pop('ignore_quotes', False)

        # If they just want to ignore one error code, make it a tuple.
        if isinstance(ignore_errors, int):
            ignore_errors = (ignore_errors, )

        input  = kwargs.pop('input',  None)
        output = kwargs.pop('output', None)
        error  = kwargs.pop('error',  None)

        if input is str:
            raise ValueError('Cannot use `str` as input stream.')

        def streamify(arg, mode):
            if isinstance(arg, string_types):
                return open(arg, mode), True
            elif arg in (str, str.split):
                return subprocess.PIPE, False
            else:
                return arg, False

        ostream, close_ostream = streamify(output, 'w')
        estream, close_estream = streamify(error,  'w')
        istream, close_istream = streamify(input,  'r')

        if not ignore_quotes:
            quoted_args = [arg for arg in args if re.search(r'^"|^\'|"$|\'$', arg)]
            if quoted_args:
                tty.warn(
                    "Quotes in command arguments can confuse scripts like"
                    " configure.",
                    "The following arguments may cause problems when executed:",
                    str("\n".join(["    " + arg for arg in quoted_args])),
                    "Quotes aren't needed because spack doesn't use a shell. "
                    "Consider removing them.",
                    "If multiple levels of quotation are required, use "
                    "`ignore_quotes=True`.")

        cmd = self.exe + list(args)

        cmd_line = "'%s'" % "' '".join(
            map(lambda arg: arg.replace("'", "'\"'\"'"), cmd))

        tty.debug(cmd_line)

        try:
            proc = subprocess.Popen(
                cmd,
                stdin=istream,
                stderr=estream,
                stdout=ostream,
                env=env)
            out, err = proc.communicate()

            result = None
            if output in (str, str.split) or error in (str, str.split):
                result = ''
                if output in (str, str.split):
                    outstr = text_type(out.decode('utf-8'))
                    result += outstr
                    if output is str.split:
                        sys.stdout.write(outstr)
                if error in (str, str.split):
                    errstr = text_type(err.decode('utf-8'))
                    result += errstr
                    if error is str.split:
                        sys.stderr.write(errstr)

            rc = self.returncode = proc.returncode
            if fail_on_error and rc != 0 and (rc not in ignore_errors):
                long_msg = cmd_line
                if result:
                    # If the output is not captured in the result, it will have
                    # been stored either in the specified files (e.g. if
                    # 'output' specifies a file) or written to the parent's
                    # stdout/stderr (e.g. if 'output' is not specified)
                    long_msg += '\n' + result

                raise ProcessError('Command exited with status %d:' %
                                   proc.returncode, long_msg)

            return result

        except OSError as e:
            raise ProcessError(
                '%s: %s' % (self.exe[0], e.strerror), 'Command: ' + cmd_line)

        except subprocess.CalledProcessError as e:
            if fail_on_error:
                raise ProcessError(
                    str(e), '\nExit status %d when invoking command: %s' %
                    (proc.returncode, cmd_line))

        finally:
            if close_ostream:
                ostream.close()
            if close_estream:
                estream.close()
            if close_istream:
                istream.close()
Beispiel #55
0
    def _save_distutil_vars(self):
        """
        Run before changing automatically generated contents of the
        _sysconfigdata.py, which is used by distutils to figure out what
        executables to use while compiling and linking extensions. If we build
        extensions with spack those executables should be spack's wrappers.
        Spack partially covers this by setting environment variables that
        are also accounted for by distutils. Currently there is one more known
        variable that must be set, which is LDSHARED, so the method saves its
        autogenerated value to pass it to the dependent package's setup script.
        """

        self._distutil_vars = {}

        input_filename = self.get_sysconfigdata_name()
        input_dict = None
        try:
            with open(input_filename) as input_file:
                match = re.search(r'build_time_vars\s*=\s*(?P<dict>{.*})',
                                  input_file.read(),
                                  flags=re.DOTALL)

                if match:
                    input_dict = ast.literal_eval(match.group('dict'))
        except (IOError, SyntaxError):
            pass

        if not input_dict:
            tty.warn("Failed to find 'build_time_vars' dictionary in file "
                     "'%s'. This might cause the extensions that are "
                     "installed with distutils to call compilers directly "
                     "avoiding Spack's wrappers." % input_filename)
            return

        for var_name in Python._DISTUTIL_VARS_TO_SAVE:
            if var_name in input_dict:
                self._distutil_vars[var_name] = input_dict[var_name]
            else:
                tty.warn("Failed to find key '%s' in 'build_time_vars' "
                         "dictionary in file '%s'. This might cause the "
                         "extensions that are installed with distutils to "
                         "call compilers directly avoiding Spack's wrappers."
                         % (var_name, input_filename))

        if len(self._distutil_vars) > 0:
            output_filename = None
            try:
                output_filename = join_path(
                    spack.store.layout.metadata_path(self.spec),
                    Python._DISTUTIL_CACHE_FILENAME)
                with open(output_filename, 'w') as output_file:
                    sjson.dump(self._distutil_vars, output_file)
            except Exception:
                tty.warn("Failed to save metadata for distutils. This might "
                         "cause the extensions that are installed with "
                         "distutils to call compilers directly avoiding "
                         "Spack's wrappers.")
                # We make the cache empty if we failed to save it to file
                # to provide the same behaviour as in the case when the cache
                # is initialized by the method load_distutils_data().
                self._distutil_vars = {}
                if output_filename:
                    force_remove(output_filename)
Beispiel #56
0
def flake8(parser, args):
    tty.warn("spack flake8 is deprecated",
             "please use `spack style` to run style checks")
    return spack.cmd.style.style(parser, args)
Beispiel #57
0
def ci_rebuild(args):
    """Check a single spec against the remote mirror, and rebuild it from
       source if the mirror does not contain the full hash match of the spec
       as computed locally. """
    env = ev.get_env(args, 'ci rebuild', required=True)

    # Make sure the environment is "gitlab-enabled", or else there's nothing
    # to do.
    yaml_root = ev.config_dict(env.yaml)
    gitlab_ci = None
    if 'gitlab-ci' in yaml_root:
        gitlab_ci = yaml_root['gitlab-ci']

    if not gitlab_ci:
        tty.die('spack ci rebuild requires an env containing gitlab-ci cfg')

    # Grab the environment variables we need.  These either come from the
    # pipeline generation step ("spack ci generate"), where they were written
    # out as variables, or else provided by GitLab itself.
    pipeline_artifacts_dir = get_env_var('SPACK_ARTIFACTS_ROOT')
    job_log_dir = get_env_var('SPACK_JOB_LOG_DIR')
    repro_dir = get_env_var('SPACK_JOB_REPRO_DIR')
    local_mirror_dir = get_env_var('SPACK_LOCAL_MIRROR_DIR')
    concrete_env_dir = get_env_var('SPACK_CONCRETE_ENV_DIR')
    ci_pipeline_id = get_env_var('CI_PIPELINE_ID')
    ci_job_name = get_env_var('CI_JOB_NAME')
    signing_key = get_env_var('SPACK_SIGNING_KEY')
    root_spec = get_env_var('SPACK_ROOT_SPEC')
    job_spec_pkg_name = get_env_var('SPACK_JOB_SPEC_PKG_NAME')
    compiler_action = get_env_var('SPACK_COMPILER_ACTION')
    cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
    related_builds = get_env_var('SPACK_RELATED_BUILDS_CDASH')
    spack_pipeline_type = get_env_var('SPACK_PIPELINE_TYPE')
    pr_mirror_url = get_env_var('SPACK_PR_MIRROR_URL')
    remote_mirror_url = get_env_var('SPACK_REMOTE_MIRROR_URL')

    # Construct absolute paths relative to current $CI_PROJECT_DIR
    ci_project_dir = get_env_var('CI_PROJECT_DIR')
    pipeline_artifacts_dir = os.path.join(ci_project_dir,
                                          pipeline_artifacts_dir)
    job_log_dir = os.path.join(ci_project_dir, job_log_dir)
    repro_dir = os.path.join(ci_project_dir, repro_dir)
    local_mirror_dir = os.path.join(ci_project_dir, local_mirror_dir)
    concrete_env_dir = os.path.join(ci_project_dir, concrete_env_dir)

    # Debug print some of the key environment variables we should have received
    tty.debug('pipeline_artifacts_dir = {0}'.format(pipeline_artifacts_dir))
    tty.debug('root_spec = {0}'.format(root_spec))
    tty.debug('remote_mirror_url = {0}'.format(remote_mirror_url))
    tty.debug('job_spec_pkg_name = {0}'.format(job_spec_pkg_name))
    tty.debug('compiler_action = {0}'.format(compiler_action))

    # Query the environment manifest to find out whether we're reporting to a
    # CDash instance, and if so, gather some information from the manifest to
    # support that task.
    enable_cdash = False
    if 'cdash' in yaml_root:
        enable_cdash = True
        ci_cdash = yaml_root['cdash']
        job_spec_buildgroup = ci_cdash['build-group']
        cdash_base_url = ci_cdash['url']
        cdash_project = ci_cdash['project']
        proj_enc = urlencode({'project': cdash_project})
        eq_idx = proj_enc.find('=') + 1
        cdash_project_enc = proj_enc[eq_idx:]
        cdash_site = ci_cdash['site']
        tty.debug('cdash_base_url = {0}'.format(cdash_base_url))
        tty.debug('cdash_project = {0}'.format(cdash_project))
        tty.debug('cdash_project_enc = {0}'.format(cdash_project_enc))
        tty.debug('cdash_build_name = {0}'.format(cdash_build_name))
        tty.debug('cdash_site = {0}'.format(cdash_site))
        tty.debug('related_builds = {0}'.format(related_builds))
        tty.debug('job_spec_buildgroup = {0}'.format(job_spec_buildgroup))

    # Is this a pipeline run on a spack PR or a merge to develop?  It might
    # be neither, e.g. a pipeline run on some environment repository.
    spack_is_pr_pipeline = spack_pipeline_type == 'spack_pull_request'
    spack_is_develop_pipeline = spack_pipeline_type == 'spack_protected_branch'

    tty.debug('Pipeline type - PR: {0}, develop: {1}'.format(
        spack_is_pr_pipeline, spack_is_develop_pipeline))

    # Figure out what is our temporary storage mirror: Is it artifacts
    # buildcache?  Or temporary-storage-url-prefix?  In some cases we need to
    # force something or pipelines might not have a way to propagate build
    # artifacts from upstream to downstream jobs.
    pipeline_mirror_url = None

    temp_storage_url_prefix = None
    if 'temporary-storage-url-prefix' in gitlab_ci:
        temp_storage_url_prefix = gitlab_ci['temporary-storage-url-prefix']
        pipeline_mirror_url = url_util.join(temp_storage_url_prefix,
                                            ci_pipeline_id)

    enable_artifacts_mirror = False
    if 'enable-artifacts-buildcache' in gitlab_ci:
        enable_artifacts_mirror = gitlab_ci['enable-artifacts-buildcache']
        if (enable_artifacts_mirror
                or (spack_is_pr_pipeline and not enable_artifacts_mirror
                    and not temp_storage_url_prefix)):
            # If you explicitly enabled the artifacts buildcache feature, or
            # if this is a PR pipeline but you did not enable either of the
            # per-pipeline temporary storage features, we force the use of
            # artifacts buildcache.  Otherwise jobs will not have binary
            # dependencies from previous stages available since we do not
            # allow pushing binaries to the remote mirror during PR pipelines.
            enable_artifacts_mirror = True
            pipeline_mirror_url = 'file://' + local_mirror_dir
            mirror_msg = 'artifact buildcache enabled, mirror url: {0}'.format(
                pipeline_mirror_url)
            tty.debug(mirror_msg)

    # Whatever form of root_spec we got, use it to get a map giving us concrete
    # specs for this job and all of its dependencies.
    spec_map = spack_ci.get_concrete_specs(env, root_spec, job_spec_pkg_name,
                                           related_builds, compiler_action)
    job_spec = spec_map[job_spec_pkg_name]

    job_spec_yaml_file = '{0}.yaml'.format(job_spec_pkg_name)
    job_spec_yaml_path = os.path.join(repro_dir, job_spec_yaml_file)

    # To provide logs, cdash reports, etc for developer download/perusal,
    # these things have to be put into artifacts.  This means downstream
    # jobs that "need" this job will get those artifacts too.  So here we
    # need to clean out the artifacts we may have got from upstream jobs.

    cdash_report_dir = os.path.join(pipeline_artifacts_dir, 'cdash_report')
    if os.path.exists(cdash_report_dir):
        shutil.rmtree(cdash_report_dir)

    if os.path.exists(job_log_dir):
        shutil.rmtree(job_log_dir)

    if os.path.exists(repro_dir):
        shutil.rmtree(repro_dir)

    # Now that we removed them if they existed, create the directories we
    # need for storing artifacts.  The cdash_report directory will be
    # created internally if needed.
    os.makedirs(job_log_dir)
    os.makedirs(repro_dir)

    # Copy the concrete environment files to the repro directory so we can
    # expose them as artifacts and not conflict with the concrete environment
    # files we got as artifacts from the upstream pipeline generation job.
    # Try to cast a slightly wider net too, and hopefully get the generated
    # pipeline yaml.  If we miss it, the user will still be able to go to the
    # pipeline generation job and get it from there.
    target_dirs = [concrete_env_dir, pipeline_artifacts_dir]

    for dir_to_list in target_dirs:
        for file_name in os.listdir(dir_to_list):
            src_file = os.path.join(dir_to_list, file_name)
            if os.path.isfile(src_file):
                dst_file = os.path.join(repro_dir, file_name)
                shutil.copyfile(src_file, dst_file)

    # If signing key was provided via "SPACK_SIGNING_KEY", then try to
    # import it.
    if signing_key:
        spack_ci.import_signing_key(signing_key)

    # Depending on the specifics of this job, we might need to turn on the
    # "config:install_missing compilers" option (to build this job spec
    # with a bootstrapped compiler), or possibly run "spack compiler find"
    # (to build a bootstrap compiler or one of its deps in a
    # compiler-agnostic way), or maybe do nothing at all (to build a spec
    # using a compiler already installed on the target system).
    spack_ci.configure_compilers(compiler_action)

    # Write this job's spec yaml into the reproduction directory, and it will
    # also be used in the generated "spack install" command to install the spec
    tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path))
    with open(job_spec_yaml_path, 'w') as fd:
        fd.write(job_spec.to_yaml(hash=ht.build_hash))

    # Write the concrete root spec yaml into the reproduction directory
    root_spec_yaml_path = os.path.join(repro_dir, 'root.yaml')
    with open(root_spec_yaml_path, 'w') as fd:
        fd.write(spec_map['root'].to_yaml(hash=ht.build_hash))

    # Write some other details to aid in reproduction into an artifact
    repro_file = os.path.join(repro_dir, 'repro.json')
    repro_details = {
        'job_name': ci_job_name,
        'job_spec_yaml': job_spec_yaml_file,
        'root_spec_yaml': 'root.yaml',
        'ci_project_dir': ci_project_dir
    }
    with open(repro_file, 'w') as fd:
        fd.write(json.dumps(repro_details))

    # Write information about spack into an artifact in the repro dir
    spack_info = spack_ci.get_spack_info()
    spack_info_file = os.path.join(repro_dir, 'spack_info.txt')
    with open(spack_info_file, 'w') as fd:
        fd.write('\n{0}\n'.format(spack_info))

    # If we decided there should be a temporary storage mechanism, add that
    # mirror now so it's used when we check for a full hash match already
    # built for this spec.
    if pipeline_mirror_url:
        spack.mirror.add(spack_ci.TEMP_STORAGE_MIRROR_NAME,
                         pipeline_mirror_url, cfg.default_modify_scope())

    cdash_build_id = None
    cdash_build_stamp = None

    # Check configured mirrors for a built spec with a matching full hash
    matches = bindist.get_mirrors_for_spec(job_spec,
                                           full_hash_match=True,
                                           index_only=False)

    if matches:
        # Got a full hash match on at least one configured mirror.  All
        # matches represent the fully up-to-date spec, so should all be
        # equivalent.  If artifacts mirror is enabled, we just pick one
        # of the matches and download the buildcache files from there to
        # the artifacts, so they're available to be used by dependent
        # jobs in subsequent stages.
        tty.msg('No need to rebuild {0}, found full hash match at: '.format(
            job_spec_pkg_name))
        for match in matches:
            tty.msg('    {0}'.format(match['mirror_url']))
        if enable_artifacts_mirror:
            matching_mirror = matches[0]['mirror_url']
            build_cache_dir = os.path.join(local_mirror_dir, 'build_cache')
            tty.debug('Getting {0} buildcache from {1}'.format(
                job_spec_pkg_name, matching_mirror))
            tty.debug('Downloading to {0}'.format(build_cache_dir))
            buildcache.download_buildcache_files(job_spec, build_cache_dir,
                                                 False, matching_mirror)

        # Now we are done and successful
        sys.exit(0)

    # No full hash match anywhere means we need to rebuild spec

    # Start with spack arguments
    install_args = [base_arg for base_arg in CI_REBUILD_INSTALL_BASE_ARGS]

    config = cfg.get('config')
    if not config['verify_ssl']:
        install_args.append('-k')

    install_args.extend([
        'install',
        '--keep-stage',
        '--require-full-hash-match',
    ])

    can_verify = spack_ci.can_verify_binaries()
    verify_binaries = can_verify and spack_is_pr_pipeline is False
    if not verify_binaries:
        install_args.append('--no-check-signature')

    # If CDash reporting is enabled, we first register this build with
    # the specified CDash instance, then relate the build to those of
    # its dependencies.
    if enable_cdash:
        tty.debug('CDash: Registering build')
        (cdash_build_id, cdash_build_stamp) = spack_ci.register_cdash_build(
            cdash_build_name, cdash_base_url, cdash_project, cdash_site,
            job_spec_buildgroup)

        if cdash_build_id is not None:
            cdash_upload_url = '{0}/submit.php?project={1}'.format(
                cdash_base_url, cdash_project_enc)

            install_args.extend([
                '--cdash-upload-url',
                cdash_upload_url,
                '--cdash-build',
                cdash_build_name,
                '--cdash-site',
                cdash_site,
                '--cdash-buildstamp',
                cdash_build_stamp,
            ])

            tty.debug('CDash: Relating build with dependency builds')
            spack_ci.relate_cdash_builds(
                spec_map, cdash_base_url, cdash_build_id, cdash_project,
                [pipeline_mirror_url, pr_mirror_url, remote_mirror_url])

    # A compiler action of 'FIND_ANY' means we are building a bootstrap
    # compiler or one of its deps.
    # TODO: when compilers are dependencies, we should include --no-add
    if compiler_action != 'FIND_ANY':
        install_args.append('--no-add')

    # TODO: once we have the concrete spec registry, use the DAG hash
    # to identify the spec to install, rather than the concrete spec
    # yaml file.
    install_args.extend(['-f', job_spec_yaml_path])

    tty.debug('Installing {0} from source'.format(job_spec.name))
    tty.debug('spack install arguments: {0}'.format(install_args))

    # Write the install command to a shell script
    with open('install.sh', 'w') as fd:
        fd.write('#!/bin/bash\n\n')
        fd.write('\n# spack install command\n')
        fd.write(' '.join(['"{0}"'.format(i) for i in install_args]))
        fd.write('\n')

    st = os.stat('install.sh')
    os.chmod('install.sh', st.st_mode | stat.S_IEXEC)

    install_copy_path = os.path.join(repro_dir, 'install.sh')
    shutil.copyfile('install.sh', install_copy_path)

    # Run the generated install.sh shell script as if it were being run in
    # a login shell.
    try:
        install_process = subprocess.Popen(['bash', '-l', './install.sh'])
        install_process.wait()
        install_exit_code = install_process.returncode
    except (ValueError, subprocess.CalledProcessError, OSError) as inst:
        tty.error('Encountered error running install script')
        tty.error(inst)

    # Now do the post-install tasks
    tty.debug('spack install exited {0}'.format(install_exit_code))

    # If a spec fails to build in a spack develop pipeline, we add it to a
    # list of known broken full hashes.  This allows spack PR pipelines to
    # avoid wasting compute cycles attempting to build those hashes.
    if install_exit_code == INSTALL_FAIL_CODE and spack_is_develop_pipeline:
        tty.debug('Install failed on develop')
        if 'broken-specs-url' in gitlab_ci:
            broken_specs_url = gitlab_ci['broken-specs-url']
            dev_fail_hash = job_spec.full_hash()
            broken_spec_path = url_util.join(broken_specs_url, dev_fail_hash)
            tty.msg('Reporting broken develop build as: {0}'.format(
                broken_spec_path))
            tmpdir = tempfile.mkdtemp()
            empty_file_path = os.path.join(tmpdir, 'empty.txt')

            broken_spec_details = {
                'broken-spec': {
                    'job-url': get_env_var('CI_JOB_URL'),
                    'pipeline-url': get_env_var('CI_PIPELINE_URL'),
                    'concrete-spec-yaml': job_spec.to_dict(hash=ht.full_hash)
                }
            }

            try:
                with open(empty_file_path, 'w') as efd:
                    efd.write(syaml.dump(broken_spec_details))
                web_util.push_to_url(empty_file_path,
                                     broken_spec_path,
                                     keep_original=False,
                                     extra_args={'ContentType': 'text/plain'})
            except Exception as err:
                # If we got some kind of S3 (access denied or other connection
                # error), the first non boto-specific class in the exception
                # hierarchy is Exception.  Just print a warning and return
                msg = 'Error writing to broken specs list {0}: {1}'.format(
                    broken_spec_path, err)
                tty.warn(msg)
            finally:
                shutil.rmtree(tmpdir)

    # We generated the "spack install ..." command to "--keep-stage", copy
    # any logs from the staging directory to artifacts now
    spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)

    # Create buildcache on remote mirror, either on pr-specific mirror or
    # on the main mirror defined in the gitlab-enabled spack environment
    if spack_is_pr_pipeline:
        buildcache_mirror_url = pr_mirror_url
    else:
        buildcache_mirror_url = remote_mirror_url

    # If the install succeeded, create a buildcache entry for this job spec
    # and push it to one or more mirrors.  If the install did not succeed,
    # print out some instructions on how to reproduce this build failure
    # outside of the pipeline environment.
    if install_exit_code == 0:
        can_sign = spack_ci.can_sign_binaries()
        sign_binaries = can_sign and spack_is_pr_pipeline is False

        # Create buildcache in either the main remote mirror, or in the
        # per-PR mirror, if this is a PR pipeline
        if buildcache_mirror_url:
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          buildcache_mirror_url, sign_binaries)

            if cdash_build_id:
                tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
                    cdash_build_id, buildcache_mirror_url))
                spack_ci.write_cdashid_to_mirror(cdash_build_id, job_spec,
                                                 buildcache_mirror_url)

        # Create another copy of that buildcache in the per-pipeline
        # temporary storage mirror (this is only done if either
        # artifacts buildcache is enabled or a temporary storage url
        # prefix is set)
        if pipeline_mirror_url:
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          pipeline_mirror_url, sign_binaries)

            if cdash_build_id:
                tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
                    cdash_build_id, pipeline_mirror_url))
                spack_ci.write_cdashid_to_mirror(cdash_build_id, job_spec,
                                                 pipeline_mirror_url)

        # If this is a develop pipeline, check if the spec that we just built is
        # on the broken-specs list. If so, remove it.
        if spack_is_develop_pipeline and 'broken-specs-url' in gitlab_ci:
            broken_specs_url = gitlab_ci['broken-specs-url']
            just_built_hash = job_spec.full_hash()
            broken_spec_path = url_util.join(broken_specs_url, just_built_hash)
            if web_util.url_exists(broken_spec_path):
                tty.msg('Removing {0} from the list of broken specs'.format(
                    broken_spec_path))
                try:
                    web_util.remove_url(broken_spec_path)
                except Exception as err:
                    # If we got some kind of S3 (access denied or other connection
                    # error), the first non boto-specific class in the exception
                    # hierarchy is Exception.  Just print a warning and return
                    msg = 'Error removing {0} from broken specs list: {1}'.format(
                        broken_spec_path, err)
                    tty.warn(msg)

    else:
        tty.debug('spack install exited non-zero, will not create buildcache')

        api_root_url = get_env_var('CI_API_V4_URL')
        ci_project_id = get_env_var('CI_PROJECT_ID')
        ci_job_id = get_env_var('CI_JOB_ID')

        repro_job_url = '{0}/projects/{1}/jobs/{2}/artifacts'.format(
            api_root_url, ci_project_id, ci_job_id)

        # Control characters cause this to be printed in blue so it stands out
        reproduce_msg = """

\033[34mTo reproduce this build locally, run:

    spack ci reproduce-build {0} [--working-dir <dir>]

If this project does not have public pipelines, you will need to first:

    export GITLAB_PRIVATE_TOKEN=<generated_token>

... then follow the printed instructions.\033[0;0m

""".format(repro_job_url)

        print(reproduce_msg)

    # Tie job success/failure to the success/failure of building the spec
    return install_exit_code
Beispiel #58
0
    def fetch(self):
        self.stage.chdir()

        if self.archive_file:
            tty.msg("Already downloaded %s" % self.archive_file)
            return

        possible_files = self.stage.expected_archive_files
        save_file = None
        partial_file = None
        if possible_files:
            save_file = self.stage.expected_archive_files[0]
            partial_file = self.stage.expected_archive_files[0] + '.part'

        tty.msg("Trying to fetch from %s" % self.url)

        if partial_file:
            save_args = [
                '-C',
                '-',  # continue partial downloads
                '-o',
                partial_file
            ]  # use a .part file
        else:
            save_args = ['-O']

        curl_args = save_args + [
            '-f',  # fail on >400 errors
            '-D',
            '-',  # print out HTML headers
            '-L',  # resolve 3xx redirects
            self.url,
        ]

        if sys.stdout.isatty():
            curl_args.append('-#')  # status bar when using a tty
        else:
            curl_args.append('-sS')  # just errors when not.

        # Run curl but grab the mime type from the http headers
        headers = spack.curl(*curl_args, output=str, fail_on_error=False)

        if spack.curl.returncode != 0:
            # clean up archive on failure.
            if self.archive_file:
                os.remove(self.archive_file)

            if partial_file and os.path.exists(partial_file):
                os.remove(partial_file)

            if spack.curl.returncode == 22:
                # This is a 404.  Curl will print the error.
                raise FailedDownloadError(self.url,
                                          "URL %s was not found!" % self.url)

            elif spack.curl.returncode == 60:
                # This is a certificate error.  Suggest spack -k
                raise FailedDownloadError(
                    self.url,
                    "Curl was unable to fetch due to invalid certificate. "
                    "This is either an attack, or your cluster's SSL "
                    "configuration is bad.  If you believe your SSL "
                    "configuration is bad, you can try running spack -k, "
                    "which will not check SSL certificates."
                    "Use this at your own risk.")

            else:
                # This is some other curl error.  Curl will print the
                # error, but print a spack message too
                raise FailedDownloadError(
                    self.url,
                    "Curl failed with error %d" % spack.curl.returncode)

        # Check if we somehow got an HTML file rather than the archive we
        # asked for.  We only look at the last content type, to handle
        # redirects properly.
        content_types = re.findall(r'Content-Type:[^\r\n]+', headers)
        if content_types and 'text/html' in content_types[-1]:
            tty.warn(
                "The contents of ", (self.archive_file if self.archive_file
                                     is not None else "the archive"),
                " look like HTML.",
                "The checksum will likely be bad.  If it is, you can use",
                "'spack clean <package>' to remove the bad archive, then",
                "fix your internet gateway issue and install again.")
        if save_file:
            os.rename(partial_file, save_file)

        if not self.archive_file:
            raise FailedDownloadError(self.url)
Beispiel #59
0
def refresh(module_types, specs, args):
    """Regenerates the module files for every spec in specs and every module
    type in module types.
    """

    # Prompt a message to the user about what is going to change
    if not specs:
        tty.msg('No package matches your query')
        return

    if not args.yes_to_all:
        msg = 'You are about to regenerate {types} module files for:\n'
        types = ', '.join(module_types)
        tty.msg(msg.format(types=types))
        spack.cmd.display_specs(specs, long=True)
        print('')
        answer = tty.get_yes_or_no('Do you want to proceed?')
        if not answer:
            tty.die('Module file regeneration aborted.')

    # Cycle over the module types and regenerate module files
    for module_type in module_types:

        cls = spack.modules.module_types[module_type]

        writers = [
            cls(spec) for spec in specs if spack.repo.exists(spec.name)
        ]  # skip unknown packages.

        # Filter blacklisted packages early
        writers = [x for x in writers if not x.conf.blacklisted]

        # Detect name clashes in module files
        file2writer = collections.defaultdict(list)
        for item in writers:
            file2writer[item.layout.filename].append(item)

        if len(file2writer) != len(writers):
            message = 'Name clashes detected in module files:\n'
            for filename, writer_list in file2writer.items():
                if len(writer_list) > 1:
                    message += '\nfile: {0}\n'.format(filename)
                    for x in writer_list:
                        message += 'spec: {0}\n'.format(x.spec.format())
            tty.error(message)
            tty.error('Operation aborted')
            raise SystemExit(1)

        if len(writers) == 0:
            msg = 'Nothing to be done for {0} module files.'
            tty.msg(msg.format(module_type))
            continue

        # If we arrived here we have at least one writer
        module_type_root = writers[0].layout.dirname()
        # Proceed regenerating module files
        tty.msg('Regenerating {name} module files'.format(name=module_type))
        if os.path.isdir(module_type_root) and args.delete_tree:
            shutil.rmtree(module_type_root, ignore_errors=False)
        filesystem.mkdirp(module_type_root)
        for x in writers:
            try:
                x.write(overwrite=True)
            except Exception as e:
                msg = 'Could not write module file [{0}]'
                tty.warn(msg.format(x.layout.filename))
                tty.warn('\t--> {0} <--'.format(str(e)))
Beispiel #60
0
 def handle_fetch_error(self, error):
     tty.warn("Fetching OpenSSL failed. This may indicate that OpenSSL has "
              "been updated, and the version in your instance of Spack is "
              "insecure. Consider updating to the latest OpenSSL version.")