Exemplo n.º 1
0
Arquivo: env.py Projeto: LLNL/spack
def env_deactivate(args):
    if not args.shell:
        msg = [
            "This command works best with Spack's shell support",
            ""
        ] + spack.cmd.common.shell_init_instructions + [
            'Or, if you want to use `spack env activate` without initializing',
            'shell support, you can run one of these:',
            '',
            '    eval `spack env deactivate --sh`   # for bash/sh',
            '    eval `spack env deactivate --csh`  # for csh/tcsh',
        ]
        tty.msg(*msg)
        return 1

    if 'SPACK_ENV' not in os.environ:
        tty.die('No environment is currently active.')

    if args.shell == 'csh':
        sys.stdout.write('unsetenv SPACK_ENV;\n')
        sys.stdout.write('if ( $?SPACK_OLD_PROMPT ) '
                         'set prompt="$SPACK_OLD_PROMPT" && '
                         'unsetenv SPACK_OLD_PROMPT;\n')
        sys.stdout.write('unalias despacktivate;\n')

    else:
        sys.stdout.write('unset SPACK_ENV; export SPACK_ENV;\n')
        sys.stdout.write('unalias despacktivate;\n')
        sys.stdout.write('if [ -n "$SPACK_OLD_PS1" ]; then\n')
        sys.stdout.write('export PS1="$SPACK_OLD_PS1";\n')
        sys.stdout.write('unset SPACK_OLD_PS1; export SPACK_OLD_PS1;\n')
        sys.stdout.write('fi;\n')
Exemplo n.º 2
0
Arquivo: env.py Projeto: LLNL/spack
def env_remove(args):
    """Remove a *named* environment.

    This removes an environment managed by Spack. Directory environments
    and `spack.yaml` files embedded in repositories should be removed
    manually.
    """
    read_envs = []
    for env_name in args.rm_env:
        env = ev.read(env_name)
        read_envs.append(env)

    if not args.yes_to_all:
        answer = tty.get_yes_or_no(
            'Really remove %s %s?' % (
                string.plural(len(args.rm_env), 'environment', show_n=False),
                string.comma_and(args.rm_env)),
            default=False)
        if not answer:
            tty.die("Will not remove any environments")

    for env in read_envs:
        if env.active:
            tty.die("Environment %s can't be removed while activated.")

        env.destroy()
        tty.msg("Successfully removed environment '%s'" % env.name)
Exemplo n.º 3
0
def bootstrap(parser, args):
    origin_url = get_origin_url()
    prefix = args.prefix

    tty.msg("Fetching spack from origin: %s" % origin_url)

    if os.path.exists(join_path(prefix, '.git')):
        tty.die("There already seems to be a git repository in %s" % prefix)

    files_in_the_way = os.listdir(prefix)
    if files_in_the_way:
        tty.die("There are already files there!  Delete these files before boostrapping spack.",
                *files_in_the_way)

    tty.msg("Installing:",
            "%s/bin/spack" % prefix,
            "%s/lib/spack/..." % prefix)

    os.chdir(prefix)
    check_call(['git', 'init', '--shared', '-q'])
    check_call(['git', 'remote', 'add', 'origin', origin_url])
    check_call(['git', 'fetch', 'origin', 'master:refs/remotes/origin/master', '-n', '-q'])
    check_call(['git', 'reset', '--hard', 'origin/master', '-q'])

    tty.msg("Successfully created a new spack in %s" % prefix,
            "Run %s/bin/spack to use this installation." % prefix)
Exemplo n.º 4
0
def rm(mtype, specs, args):
    """Deletes module files associated with items in specs"""
    module_cls = module_types[mtype]
    specs_with_modules = [
        spec for spec in specs if os.path.exists(module_cls(spec).file_name)]
    modules = [module_cls(spec) for spec in specs_with_modules]

    if not modules:
        tty.msg('No module file matches your query')
        raise SystemExit(1)

    # Ask for confirmation
    if not args.yes_to_all:
        tty.msg(
            'You are about to remove {0} module files the following specs:\n'
            .format(mtype))
        spack.cmd.display_specs(specs_with_modules, long=True)
        print('')
        answer = tty.get_yes_or_no('Do you want to proceed?')
        if not answer:
            tty.die('Will not remove any module files')

    # Remove the module files
    for s in modules:
        s.remove()
Exemplo n.º 5
0
def module(parser, args):
    # Qualifiers to be used when querying the db for specs
    constraint_qualifiers = {
        'refresh': {
            'installed': True,
            'known': True
        },
    }
    query_args = constraint_qualifiers.get(args.subparser_name, {})
    specs = args.specs(**query_args)
    module_type = args.module_type
    constraint = args.constraint
    try:
        callbacks[args.subparser_name](module_type, specs, args)
    except MultipleMatches:
        message = ("the constraint '{query}' matches multiple packages, "
                   "and this is not allowed in this context")
        tty.error(message.format(query=constraint))
        for s in specs:
            sys.stderr.write(s.format(color=True) + '\n')
        raise SystemExit(1)
    except NoMatch:
        message = ("the constraint '{query}' matches no package, "
                   "and this is not allowed in this context")
        tty.die(message.format(query=constraint))
Exemplo n.º 6
0
def create(parser, args):
    # Gather information about the package to be created
    name = get_name(args)
    url = get_url(args)
    versions, guesser = get_versions(args, name)
    build_system = get_build_system(args, guesser)

    # Create the package template object
    PackageClass = templates[build_system]
    package = PackageClass(name, url, versions)
    tty.msg("Created template for {0} package".format(package.name))

    # Create a directory for the new package
    repo = get_repository(args, name)
    pkg_path = repo.filename_for_package_name(package.name)
    if os.path.exists(pkg_path) and not args.force:
        tty.die('{0} already exists.'.format(pkg_path),
                '  Try running `spack create --force` to overwrite it.')
    else:
        mkdirp(os.path.dirname(pkg_path))

    # Write the new package file
    package.write(pkg_path)
    tty.msg("Created package file: {0}".format(pkg_path))

    # Open up the new package file in your $EDITOR
    spack.editor(pkg_path)
Exemplo n.º 7
0
Arquivo: info.py Projeto: LLNL/cram
def info(parser, args):
    if not args.cramfile:
        tty.error("You must specify a file to display with cram info.")

    with closing(CramFile(args.cramfile, 'r')) as cf:
        if args.all:
            write_header(args, cf)
            print
            print "Job information:"
            for i, job in enumerate(cf):
                print "Job %d:" % i
                write_job_info(job)

        elif args.job is not None:
            if args.job < 0 or args.job >= len(cf):
                tty.die("No job %d in this cram file." % args.job)
            print "Job %d:" % args.job
            for i, job in enumerate(cf):
                if i == args.job:
                    write_job_info(job)
                    break

        else:
            write_header(args, cf)
            print
            write_job_summary(args, cf)
Exemplo n.º 8
0
 def cxx14_flag(self):
     if self.version < ver('4.8'):
         tty.die("Only gcc 4.8 and above support c++14.")
     elif self.version < ver('4.9'):
         return "-std=c++1y"
     else:
         return "-std=c++14"
Exemplo n.º 9
0
def env(parser, args):
    if not args.spec:
        tty.die("spack env requires a spec.")

    # Specs may have spaces in them, so if they do, require that the
    # caller put a '--' between the spec and the command to be
    # executed.  If there is no '--', assume that the spec is the
    # first argument.
    sep = '--'
    if sep in args.spec:
        s = args.spec.index(sep)
        spec = args.spec[:s]
        cmd  = args.spec[s+1:]
    else:
        spec = args.spec[0]
        cmd  = args.spec[1:]

    specs = spack.cmd.parse_specs(spec, concretize=True)
    if len(specs) > 1:
        tty.die("spack env only takes one spec.")
    spec = specs[0]

    build_env.setup_package(spec.package)

    if not cmd:
        # If no command act like the "env" command and print out env vars.
        for key, val in os.environ.items():
            print "%s=%s" % (key, val)

    else:
        # Otherwise execute the command with the new environment
        os.execvp(cmd[0], cmd)
Exemplo n.º 10
0
def spec(parser, args):
    name_fmt = '$.' if args.namespaces else '$_'
    kwargs = {'cover': args.cover,
              'format': name_fmt + '$@$%@+$+$=',
              'hashlen': None if args.very_long else 7,
              'show_types': args.types,
              'install_status': args.install_status}

    if not args.specs:
        tty.die("spack spec requires at least one spec")

    for spec in spack.cmd.parse_specs(args.specs):
        # With -y, just print YAML to output.
        if args.yaml:
            if spec.name in spack.repo.path or spec.virtual:
                spec.concretize()

            # use write because to_yaml already has a newline.
            sys.stdout.write(spec.to_yaml())
            continue

        kwargs['hashes'] = False  # Always False for input spec
        print("Input spec")
        print("--------------------------------")
        print(spec.tree(**kwargs))

        kwargs['hashes'] = args.long or args.very_long
        print("Concretized")
        print("--------------------------------")
        spec.concretize()
        print(spec.tree(**kwargs))
Exemplo n.º 11
0
def which(*args, **kwargs):
    """Finds an executable in the path like command-line which.

    If given multiple executables, returns the first one that is found.
    If no executables are found, returns None.

    Parameters:
        *args (str): One or more executables to search for

    Keyword Arguments:
        path (:func:`list` or str): The path to search. Defaults to ``PATH``
        required (bool): If set to True, raise an error if executable not found

    Returns:
        Executable: The first executable that is found in the path
    """
    path = kwargs.get('path', os.environ.get('PATH', ''))
    required = kwargs.get('required', False)

    if isinstance(path, string_types):
        path = path.split(os.pathsep)

    for name in args:
        for directory in path:
            exe = os.path.join(directory, name)
            if os.path.isfile(exe) and os.access(exe, os.X_OK):
                return Executable(exe)

    if required:
        tty.die("spack requires '%s'. Make sure it is in your path." % args[0])

    return None
Exemplo n.º 12
0
Arquivo: graph.py Projeto: LLNL/spack
def graph(parser, args):
    concretize = not args.normalize
    if args.installed:
        if args.specs:
            tty.die("Can't specify specs with --installed")
        args.dot = True
        specs = spack.store.db.query()

    else:
        specs = spack.cmd.parse_specs(
            args.specs, normalize=True, concretize=concretize)

    if not specs:
        setup_parser.parser.print_help()
        return 1

    deptype = all_deptypes
    if args.deptype:
        deptype = tuple(args.deptype.split(','))
        if deptype == ('all',):
            deptype = 'all'
        deptype = canonical_deptype(deptype)

    if args.dot:  # Dot graph only if asked for.
        graph_dot(specs, static=args.static, deptype=deptype)

    elif specs:  # ascii is default: user doesn't need to provide it explicitly
        debug = spack.config.get('config:debug')
        graph_ascii(specs[0], debug=debug, deptype=deptype)
        for spec in specs[1:]:
            print()  # extra line bt/w independent graphs
            graph_ascii(spec, debug=debug)
Exemplo n.º 13
0
 def cxx11_flag(self):
     if self.version < ver('11.1'):
         tty.die("Only intel 11.1 and above support c++11.")
     elif self.version < ver('13'):
         return "-std=c++0x"
     else:
         return "-std=c++11"
Exemplo n.º 14
0
Arquivo: mirror.py Projeto: trws/spack
def mirror_remove(args):
    """Remove a mirror by name."""
    name = args.name

    rmd_something = spack.config.remove_from_config('mirrors', name)
    if not rmd_something:
        tty.die("No such mirror: %s" % name)
Exemplo n.º 15
0
 def cxx11_flag(self):
     if self.version < ver('4.3'):
         tty.die("Only gcc 4.3 and above support c++11.")
     elif self.version < ver('4.7'):
         return "-std=c++0x"
     else:
         return "-std=c++11"
Exemplo n.º 16
0
Arquivo: fetch.py Projeto: LLNL/spack
def fetch(parser, args):
    if not args.packages:
        tty.die("fetch requires at least one package argument")

    if args.no_checksum:
        spack.config.set('config:checksum', False, scope='command_line')

    specs = spack.cmd.parse_specs(args.packages, concretize=True)
    for spec in specs:
        if args.missing or args.dependencies:
            for s in spec.traverse():
                package = spack.repo.get(s)

                # Skip already-installed packages with --missing
                if args.missing and package.installed:
                    continue

                # Do not attempt to fetch externals (they're local)
                if package.spec.external:
                    continue

                package.do_fetch()

        package = spack.repo.get(spec)
        package.do_fetch()
Exemplo n.º 17
0
def _stop_at_phase_during_install(args, calling_fn, phase_mapping):
    if not args.package:
        tty.die("configure requires at least one package argument")

    # TODO: to be refactored with code in install
    specs = spack.cmd.parse_specs(args.package, concretize=True)
    if len(specs) != 1:
        tty.error('only one spec can be installed at a time.')
    spec = specs.pop()
    pkg = spec.package
    try:
        key = [cls for cls in phase_mapping if isinstance(pkg, cls)].pop()
        phase = phase_mapping[key]
        # Install package dependencies if needed
        parser = argparse.ArgumentParser()
        inst.setup_parser(parser)
        tty.msg('Checking dependencies for {0}'.format(args.package))
        cli_args = ['-v'] if args.verbose else []
        install_args = parser.parse_args(cli_args + ['--only=dependencies'])
        install_args.package = args.package
        inst.install(parser, install_args)
        # Install package and stop at the given phase
        cli_args = ['-v'] if args.verbose else []
        install_args = parser.parse_args(cli_args + ['--only=package'])
        install_args.package = args.package
        inst.install(parser, install_args, stop_at=phase)
    except IndexError:
        tty.error(
            'Package {0} has no {1} phase, or its {1} phase is not separated from install'.format(  # NOQA: ignore=E501
                spec.name, calling_fn.__name__)
        )
Exemplo n.º 18
0
def modules_cmd(parser, args, module_type, callbacks=callbacks):

    # Qualifiers to be used when querying the db for specs
    constraint_qualifiers = {
        'refresh': {
            'installed': True,
            'known': True
        },
    }
    query_args = constraint_qualifiers.get(args.subparser_name, {})

    # Get the specs that match the query from the DB
    specs = args.specs(**query_args)

    try:

        callbacks[args.subparser_name](module_type, specs, args)

    except MultipleSpecsMatch:
        msg = "the constraint '{query}' matches multiple packages:\n"
        for s in specs:
            msg += '\t' + s.cformat(format_string='$/ $_$@$+$%@+$+$=') + '\n'
        tty.error(msg.format(query=args.constraint))
        tty.die('In this context exactly **one** match is needed: please specify your constraints better.')  # NOQA: ignore=E501

    except NoSpecMatches:
        msg = "the constraint '{query}' matches no package."
        tty.error(msg.format(query=args.constraint))
        tty.die('In this context exactly **one** match is needed: please specify your constraints better.')  # NOQA: ignore=E501
Exemplo n.º 19
0
    def _get_pkg_module(self, pkg_name):
        """Create a module for a particular package.

        This caches the module within this Repo *instance*.  It does
        *not* add it to ``sys.modules``.  So, you can construct
        multiple Repos for testing and ensure that the module will be
        loaded once per repo.

        """
        if pkg_name not in self._modules:
            file_path = self.filename_for_package_name(pkg_name)

            if not os.path.exists(file_path):
                raise UnknownPackageError(pkg_name, self)

            if not os.path.isfile(file_path):
                tty.die("Something's wrong. '%s' is not a file!" % file_path)

            if not os.access(file_path, os.R_OK):
                tty.die("Cannot read '%s'!" % file_path)

            # e.g., spack.pkg.builtin.mpich
            fullname = "%s.%s" % (self.full_namespace, pkg_name)

            module = imp.load_source(fullname, file_path)
            module.__package__ = self.full_namespace
            module.__loader__ = self
            self._modules[pkg_name] = module

        return self._modules[pkg_name]
Exemplo n.º 20
0
def make_binary_placeholder(cur_path_names, allow_root):
    """
    Replace old install root in RPATHs with placeholder in binary files
    """
    if platform.system() == 'Darwin':
        for cur_path in cur_path_names:
            rpaths, deps, idpath = macho_get_paths(cur_path)
            (new_rpaths,
             new_deps,
             new_idpath) = macho_make_paths_placeholder(rpaths, deps, idpath)
            modify_macho_object(cur_path,
                                rpaths, deps, idpath,
                                new_rpaths, new_deps, new_idpath)
            if (not allow_root and
                strings_contains_installroot(cur_path,
                                             spack.store.layout.root)):
                raise InstallRootStringException(
                    cur_path, spack.store.layout.root)
    elif platform.system() == 'Linux':
        for cur_path in cur_path_names:
            orig_rpaths = get_existing_elf_rpaths(cur_path)
            if orig_rpaths:
                new_rpaths = get_placeholder_rpaths(cur_path, orig_rpaths)
                modify_elf_object(cur_path, new_rpaths)
                if (not allow_root and
                    strings_contains_installroot(
                        cur_path, spack.store.layout.root)):
                    raise InstallRootStringException(
                        cur_path, spack.store.layout.root)
    else:
        tty.die("Placeholder not implemented for %s" % platform.system())
Exemplo n.º 21
0
def make_binary_relative(cur_path_names, orig_path_names, old_dir, allow_root):
    """
    Replace old RPATHs with paths relative to old_dir in binary files
    """
    if platform.system() == 'Darwin':
        for cur_path, orig_path in zip(cur_path_names, orig_path_names):
            rpaths, deps, idpath = macho_get_paths(cur_path)
            (new_rpaths,
             new_deps,
             new_idpath) = macho_make_paths_relative(orig_path, old_dir,
                                                     rpaths, deps, idpath)
            modify_macho_object(cur_path,
                                rpaths, deps, idpath,
                                new_rpaths, new_deps, new_idpath)
            if (not allow_root and
                strings_contains_installroot(cur_path)):
                    raise InstallRootStringException(cur_path)
    elif platform.system() == 'Linux':
        for cur_path, orig_path in zip(cur_path_names, orig_path_names):
            orig_rpaths = get_existing_elf_rpaths(cur_path)
            if orig_rpaths:
                new_rpaths = get_relative_rpaths(orig_path, old_dir,
                                                 orig_rpaths)
                modify_elf_object(cur_path, new_rpaths)
                if (not allow_root and
                    strings_contains_installroot(cur_path, old_dir)):
                        raise InstallRootStringException(cur_path, old_dir)
    else:
        tty.die("Prelocation not implemented for %s" % platform.system())
Exemplo n.º 22
0
Arquivo: cnl.py Projeto: LLNL/spack
    def find_compiler(self, cmp_cls, *paths):
        # function-local so that cnl doesn't depend on spack.config
        import spack.spec

        compilers = []
        if cmp_cls.PrgEnv:
            if not cmp_cls.PrgEnv_compiler:
                tty.die('Must supply PrgEnv_compiler with PrgEnv')

            modulecmd = get_module_cmd()

            output = modulecmd(
                'avail', cmp_cls.PrgEnv_compiler, output=str, error=str)
            version_regex = r'(%s)/([\d\.]+[\d])' % cmp_cls.PrgEnv_compiler
            matches = re.findall(version_regex, output)
            for name, version in matches:
                v = version
                comp = cmp_cls(
                    spack.spec.CompilerSpec(name + '@' + v),
                    self, "any",
                    ['cc', 'CC', 'ftn'], [cmp_cls.PrgEnv, name + '/' + v])

                compilers.append(comp)

        return compilers
Exemplo n.º 23
0
def rm(module_type, specs, args):
    """Deletes the module files associated with every spec in specs, for every
    module type in module types.
    """

    module_cls = spack.modules.module_types[module_type]
    module_exist = lambda x: os.path.exists(module_cls(x).layout.filename)

    specs_with_modules = [spec for spec in specs if module_exist(spec)]

    modules = [module_cls(spec) for spec in specs_with_modules]

    if not modules:
        tty.die('No module file matches your query')

    # Ask for confirmation
    if not args.yes_to_all:
        msg = 'You are about to remove {0} module files for:\n'
        tty.msg(msg.format(module_type))
        spack.cmd.display_specs(specs_with_modules, long=True)
        print('')
        answer = tty.get_yes_or_no('Do you want to proceed?')
        if not answer:
            tty.die('Will not remove any module files')

    # Remove the module files
    for s in modules:
        s.remove()
Exemplo n.º 24
0
def repo_remove(args):
    """Remove a repository from Spack's configuration."""
    repos = spack.config.get_config('repos', args.scope)
    path_or_namespace = args.path_or_namespace

    # If the argument is a path, remove that repository from config.
    canon_path = canonicalize_path(path_or_namespace)
    for repo_path in repos:
        repo_canon_path = canonicalize_path(repo_path)
        if canon_path == repo_canon_path:
            repos.remove(repo_path)
            spack.config.update_config('repos', repos, args.scope)
            tty.msg("Removed repository %s" % repo_path)
            return

    # If it is a namespace, remove corresponding repo
    for path in repos:
        try:
            repo = Repo(path)
            if repo.namespace == path_or_namespace:
                repos.remove(path)
                spack.config.update_config('repos', repos, args.scope)
                tty.msg("Removed repository %s with namespace '%s'."
                        % (repo.root, repo.namespace))
                return
        except RepoError as e:
            continue

    tty.die("No repository with path or namespace: %s"
            % path_or_namespace)
Exemplo n.º 25
0
def get_uninstall_list(args):
    specs = [any]
    if args.packages:
        specs = spack.cmd.parse_specs(args.packages)

    # Gets the list of installed specs that match the ones give via cli
    # takes care of '-a' is given in the cli
    uninstall_list = find_matching_specs(specs, args.all, args.force)

    # Takes care of '-d'
    dependent_list = installed_dependents(uninstall_list)

    # Process dependent_list and update uninstall_list
    has_error = False
    if dependent_list and not args.dependents and not args.force:
        for spec, lst in dependent_list.items():
            tty.error("Will not uninstall %s" % spec.cformat("$_$@$%@$/"))
            print('')
            print('The following packages depend on it:')
            spack.cmd.display_specs(lst, **display_args)
            print('')
            has_error = True
    elif args.dependents:
        for key, lst in dependent_list.items():
            uninstall_list.extend(lst)
        uninstall_list = list(set(uninstall_list))
    if has_error:
        tty.die('Use `spack uninstall --dependents` '
                'to uninstall these dependencies as well.')

    return uninstall_list
Exemplo n.º 26
0
def dependents(parser, args):
    specs = spack.cmd.parse_specs(args.spec)
    if len(specs) != 1:
        tty.die("spack dependents takes only one spec.")

    if args.installed:
        spec = spack.cmd.disambiguate_spec(specs[0])

        tty.msg("Dependents of %s" % spec.cformat('$_$@$%@$/'))
        deps = spack.store.db.installed_relatives(
            spec, 'parents', args.transitive)
        if deps:
            spack.cmd.display_specs(deps, long=True)
        else:
            print("No dependents")

    else:
        spec = specs[0]
        ideps = inverted_dependencies()

        dependents = get_dependents(spec.name, ideps, args.transitive)
        dependents.remove(spec.name)
        if dependents:
            colify(sorted(dependents))
        else:
            print("No dependents")
Exemplo n.º 27
0
def _profile_wrapper(command, parser, args, unknown_args):
    import cProfile

    try:
        nlines = int(args.lines)
    except ValueError:
        if args.lines != 'all':
            tty.die('Invalid number for --lines: %s' % args.lines)
        nlines = -1

    # allow comma-separated list of fields
    sortby = ['time']
    if args.sorted_profile:
        sortby = args.sorted_profile.split(',')
        for stat in sortby:
            if stat not in stat_names:
                tty.die("Invalid sort field: %s" % stat)

    try:
        # make a profiler and run the code.
        pr = cProfile.Profile()
        pr.enable()
        return _invoke_command(command, parser, args, unknown_args)

    finally:
        pr.disable()

        # print out profile stats.
        stats = pstats.Stats(pr)
        stats.sort_stats(*sortby)
        stats.print_stats(nlines)
Exemplo n.º 28
0
    def __init__(self, filename, mode='r'):
        """The CramFile constructor functions much like open().

           The constructor takes a filename and an I/O mode, which can
           be 'r', 'w', or 'a', for read, write, or append.

           Opening a CramFile for writing will create a file with a
           simple header containing no jobs.
        """
        # Save the first job from the file.
        self.first_job = None

        self.mode = mode
        if mode not in ('r', 'w', 'a'):
            raise ValueError("Mode must be 'r', 'w', or 'a'.")

        if mode == 'r':
            if not os.path.exists(filename) or os.path.isdir(filename):
                tty.die("No such file: %s" % filename)

            self.stream = open(filename, 'rb')
            self._read_header()

        elif mode == 'w' or (mode == 'a' and not os.path.exists(filename)):
            self.stream = open(filename, 'wb')
            self.version = _version
            self.num_jobs = 0
            self.num_procs = 0
            self.max_job_size = 0
            self._write_header()

        elif mode == 'a':
            self.stream = open(filename, 'rb+')
            self._read_header()
            self.stream.seek(0, os.SEEK_END)
Exemplo n.º 29
0
 def shell_set(var, value):
     if shell == 'sh':
         print("%s='%s'" % (var, value))
     elif shell == 'csh':
         print("set %s = '%s'" % (var, value))
     else:
         tty.die('shell must be sh or csh')
Exemplo n.º 30
0
Arquivo: create.py Projeto: d-tk/spack
def fetch_tarballs(url, name, version):
    """Try to find versions of the supplied archive by scraping the web.

    Prompts the user to select how many to download if many are found.


    """
    versions = spack.util.web.find_versions_of_archive(url)
    rkeys = sorted(versions.keys(), reverse=True)
    versions = OrderedDict(zip(rkeys, (versions[v] for v in rkeys)))

    archives_to_fetch = 1
    if not versions:
        # If the fetch failed for some reason, revert to what the user provided
        versions = { version : url }
    elif len(versions) > 1:
        tty.msg("Found %s versions of %s:" % (len(versions), name),
                *spack.cmd.elide_list(
                    ["%-10s%s" % (v,u) for v, u in versions.iteritems()]))
        print
        archives_to_fetch = tty.get_number(
            "Include how many checksums in the package file?",
            default=5, abort='q')

        if not archives_to_fetch:
            tty.die("Aborted.")

    sorted_versions = sorted(versions.keys(), reverse=True)
    sorted_urls = [versions[v] for v in sorted_versions]
    return sorted_versions[:archives_to_fetch], sorted_urls[:archives_to_fetch]
Exemplo n.º 31
0
def get_name(args):
    """Get the name of the package based on the supplied arguments.

    If a name was provided, always use that. Otherwise, if a URL was
    provided, extract the name from that. Otherwise, use a default.

    Args:
        args (param argparse.Namespace): The arguments given to
            ``spack create``

    Returns:
        str: The name of the package
    """

    # Default package name
    name = 'example'

    if args.name is not None:
        # Use a user-supplied name if one is present
        name = args.name
        if len(args.name.strip()) > 0:
            tty.msg("Using specified package name: '{0}'".format(name))
        else:
            tty.die("A package name must be provided when using the option.")
    elif args.url is not None:
        # Try to guess the package name based on the URL
        try:
            name = parse_name(args.url)
            if name != args.url:
                desc = 'URL'
            else:
                desc = 'package name'
            tty.msg("This looks like a {0} for {1}".format(desc, name))
        except UndetectableNameError:
            tty.die("Couldn't guess a name for this package.",
                    "  Please report this bug. In the meantime, try running:",
                    "  `spack create --name <name> <url>`")

    name = simplify_name(name)

    if not valid_fully_qualified_module_name(name):
        tty.die("Package name can only contain a-z, 0-9, and '-'")

    return name
Exemplo n.º 32
0
def diy(self, args):
    if not args.spec:
        tty.die("spack diy requires a package spec argument.")

    specs = spack.cmd.parse_specs(args.spec)
    if len(specs) > 1:
        tty.die("spack diy only takes one spec.")

    spec = specs[0]
    if not spack.repo.exists(spec.name):
        tty.warn("No such package: %s" % spec.name)
        create = tty.get_yes_or_no("Create this package?", default=False)
        if not create:
            tty.msg("Exiting without creating.")
            sys.exit(1)
        else:
            tty.msg("Running 'spack edit -f %s'" % spec.name)
            edit_package(spec.name, spack.repo.first_repo(), None, True)
            return

    if not spec.versions.concrete:
        tty.die("spack diy spec must have a single, concrete version. "
                "Did you forget a package version number?")

    spec.concretize()
    package = spack.repo.get(spec)

    if package.installed:
        tty.error("Already installed in %s" % package.prefix)
        tty.msg("Uninstall or try adding a version suffix for this DIY build.")
        sys.exit(1)

    # Forces the build to run out of the current directory.
    package.stage = DIYStage(os.getcwd())

    # TODO: make this an argument, not a global.
    spack.do_checksum = False

    package.do_install(
        keep_prefix=args.keep_prefix,
        install_deps=not args.ignore_deps,
        verbose=not args.quiet,
        keep_stage=True,  # don't remove source dir for DIY.
        dirty=args.dirty)
Exemplo n.º 33
0
def checksum(parser, args):
    # get the package we're going to generate checksums for
    pkg = spack.repo.get(args.package)

    # If the user asked for specific versions, use those.
    if args.versions:
        versions = {}
        for version in args.versions:
            version = ver(version)
            if not isinstance(version, Version):
                tty.die("Cannot generate checksums for version lists or " +
                        "version ranges.  Use unambiguous versions.")
            versions[version] = pkg.url_for_version(version)
    else:
        versions = pkg.fetch_remote_versions()
        if not versions:
            tty.die("Could not fetch any versions for %s" % pkg.name)

    sorted_versions = sorted(versions, reverse=True)

    tty.msg("Found %s versions of %s" % (len(versions), pkg.name),
            *spack.cmd.elide_list(
                ["%-10s%s" % (v, versions[v]) for v in sorted_versions]))
    print
    archives_to_fetch = tty.get_number(
        "How many would you like to checksum?", default=5, abort='q')

    if not archives_to_fetch:
        tty.msg("Aborted.")
        return

    version_hashes = get_checksums(
        sorted_versions[:archives_to_fetch],
        [versions[v] for v in sorted_versions[:archives_to_fetch]],
        keep_stage=args.keep_stage)

    if not version_hashes:
        tty.die("Could not fetch any versions for %s" % pkg.name)

    version_lines = [
        "  version('%s', '%s')" % (v, h) for v, h in version_hashes
    ]
    tty.msg("Checksummed new versions of %s:" % pkg.name, *version_lines)
Exemplo n.º 34
0
def activate(parser, args):
    # TODO: shouldn't have to concretize here.  Fix DAG issues.
    specs = spack.cmd.parse_specs(args.spec, concretize=True)
    if len(specs) != 1:
        tty.die("activate requires one spec.  %d given." % len(specs))

    # TODO: remove this hack when DAG info is stored in dir layout.
    # This ensures the ext spec is always normalized properly.
    spack.db.get(specs[0])

    spec = spack.cmd.disambiguate_spec(specs[0])

    if not spec.package.is_extension:
        tty.die("%s is not an extension." % spec.name)

    if spec.package.activated:
        tty.die("Package %s is already activated." % specs[0].short_spec)

    spec.package.do_activate()
Exemplo n.º 35
0
def location(parser, args):
    if args.module_dir:
        print spack.module_path

    elif args.spack_root:
        print spack.prefix

    elif args.packages:
        print spack.db.root

    else:
        specs = spack.cmd.parse_specs(args.spec)
        if not specs:
            tty.die("You must supply a spec.")
        if len(specs) != 1:
            tty.die("Too many specs.  Supply only one.")

        if args.install_dir:
            # install_dir command matches against installed specs.
            spec = spack.cmd.disambiguate_spec(specs[0])
            print spec.prefix

        else:
            spec = specs[0]

            if args.package_dir:
                # This one just needs the spec name.
                print join_path(spack.db.root, spec.name)

            else:
                # These versions need concretized specs.
                spec.concretize()
                pkg = spack.db.get(spec)

                if args.stage_dir:
                    print pkg.stage.path

                else:  #  args.build_dir is the default.
                    if not pkg.stage.source_path:
                        tty.die(
                            "Build directory does not exist yet. Run this to create it:",
                            "spack stage " + " ".join(args.spec))
                    print pkg.stage.source_path
Exemplo n.º 36
0
def get_repository(args, name):
    """Returns a Repo object that will allow us to determine the path where
    the new package file should be created.

    Args:
        args (argparse.Namespace): The arguments given to ``spack create``
        name (str): The name of the package to create

    Returns:
        spack.repo.Repo: A Repo object capable of determining the path to the
            package file
    """
    spec = Spec(name)
    # Figure out namespace for spec
    if spec.namespace and args.namespace and spec.namespace != args.namespace:
        tty.die("Namespaces '{0}' and '{1}' do not match.".format(
            spec.namespace, args.namespace))

    if not spec.namespace and args.namespace:
        spec.namespace = args.namespace

    # Figure out where the new package should live
    repo_path = args.repo
    if repo_path is not None:
        repo = spack.repo.Repo(repo_path)
        if spec.namespace and spec.namespace != repo.namespace:
            tty.die("Can't create package with namespace {0} in repo with "
                    "namespace {1}".format(spec.namespace, repo.namespace))
    else:
        if spec.namespace:
            repo = spack.repo.path.get_repo(spec.namespace, None)
            if not repo:
                tty.die("Unknown namespace: '{0}'".format(spec.namespace))
        else:
            repo = spack.repo.path.first_repo()

    # Set the namespace on the spec if it's not there already
    if not spec.namespace:
        spec.namespace = repo.namespace

    return repo
Exemplo n.º 37
0
def guess_name_and_version(url, args):
    # Try to deduce name and version of the new package from the URL
    version = spack.url.parse_version(url)
    if not version:
        tty.die("Couldn't guess a version string from %s" % url)

    # Try to guess a name.  If it doesn't work, allow the user to override.
    if args.alternate_name:
        name = args.alternate_name
    else:
        try:
            name = spack.url.parse_name(url, version)
        except spack.url.UndetectableNameError:
            # Use a user-supplied name if one is present
            tty.die("Couldn't guess a name for this package. Try running:", "",
                    "spack create --name <name> <url>")

    if not valid_fully_qualified_module_name(name):
        tty.die("Package name can only contain A-Z, a-z, 0-9, '_' and '-'")

    return name, version
Exemplo n.º 38
0
def module(parser, args):

    # Qualifiers to be used when querying the db for specs
    constraint_qualifiers = {
        'refresh': {
            'installed': True,
            'known': True
        },
    }
    query_args = constraint_qualifiers.get(args.subparser_name, {})

    # Get the specs that match the query from the DB
    specs = args.specs(**query_args)

    # Set the module types that have been selected
    module_types = args.module_type
    if module_types is None:
        # If no selection has been made select all of them
        module_types = list(spack.modules.module_types.keys())

    try:

        callbacks[args.subparser_name](module_types, specs, args)

    except MultipleSpecsMatch:
        msg = "the constraint '{query}' matches multiple packages:\n"
        for s in specs:
            msg += '\t' + s.cformat() + '\n'
        tty.error(msg.format(query=args.constraint))
        tty.die('In this context exactly **one** match is needed: please specify your constraints better.')  # NOQA: ignore=E501

    except NoSpecMatches:
        msg = "the constraint '{query}' matches no package."
        tty.error(msg.format(query=args.constraint))
        tty.die('In this context exactly **one** match is needed: please specify your constraints better.')  # NOQA: ignore=E501

    except MultipleModuleTypes:
        msg = "this command needs exactly **one** module type active."
        tty.die(msg)
Exemplo n.º 39
0
def checksum(parser, args):
    # Did the user pass 'package@version' string?
    if len(args.versions) == 0 and '@' in args.package:
        args.versions = [args.package.split('@')[1]]
        args.package = args.package.split('@')[0]

    # Make sure the user provided a package and not a URL
    if not valid_fully_qualified_module_name(args.package):
        tty.die("`spack checksum` accepts package names, not URLs.")

    # Get the package we're going to generate checksums for
    pkg = spack.repo.get(args.package)

    if args.versions:
        # If the user asked for specific versions, use those
        url_dict = {}
        for version in args.versions:
            version = ver(version)
            if not isinstance(version, Version):
                tty.die("Cannot generate checksums for version lists or "
                        "version ranges. Use unambiguous versions.")
            url_dict[version] = pkg.url_for_version(version)
    else:
        # Otherwise, see what versions we can find online
        url_dict = pkg.fetch_remote_versions()
        if not url_dict:
            tty.die("Could not find any versions for {0}".format(pkg.name))

    version_lines = spack.stage.get_checksums_for_versions(
        url_dict,
        pkg.name,
        keep_stage=args.keep_stage,
        batch=(args.batch or len(args.versions) > 0 or len(url_dict) == 1),
        fetch_options=pkg.fetch_options)

    print()
    print(version_lines)
    print()
Exemplo n.º 40
0
def install_tarball(spec, args):
    s = spack.spec.Spec(spec)
    if s.external or s.virtual:
        tty.warn("Skipping external or virtual package %s" % spec.format())
        return
    yes_to_all = False
    if args.yes_to_all:
        yes_to_all = True
    force = False
    if args.force:
        force = True
    for d in s.dependencies(deptype=('link', 'run')):
        tty.msg("Installing buildcache for dependency spec %s" % d)
        install_tarball(d, args)
    package = spack.repo.get(spec)
    if s.concrete and package.installed and not force:
        tty.warn("Package for spec %s already installed." % spec.format(),
                 " Use -f flag to overwrite.")
    else:
        tarball = bindist.download_tarball(spec)
        if tarball:
            tty.msg('Installing buildcache for spec %s' % spec.format())
            try:
                bindist.extract_tarball(spec, tarball, yes_to_all, force)
            except NoOverwriteException as e:
                tty.warn("%s exists. use -f to force overwrite." % e.args)
            except NoVerifyException:
                tty.die("Package spec file failed signature verification,"
                        " use -y flag to install build cache")
            except NoChecksumException:
                tty.die("Package tarball failed checksum verification,"
                        " use -y flag to install build cache")
            finally:
                spack.store.db.reindex(spack.store.layout)
        else:
            tty.die('Download of binary cache file for spec %s failed.' %
                    spec.format())
Exemplo n.º 41
0
def refresh(module_types, specs, args):
    """Regenerates the module files for every spec in specs and every module
    type in module types.
    """

    # Prompt a message to the user about what is going to change
    if not specs:
        tty.msg('No package matches your query')
        return

    if not args.yes_to_all:
        msg = 'You are about to regenerate {types} module files for:\n'
        types = ', '.join(module_types)
        tty.msg(msg.format(types=types))
        spack.cmd.display_specs(specs, long=True)
        print('')
        answer = tty.get_yes_or_no('Do you want to proceed?')
        if not answer:
            tty.die('Module file regeneration aborted.')

    # Cycle over the module types and regenerate module files
    for module_type in module_types:

        cls = spack.modules.module_types[module_type]

        writers = [
            cls(spec) for spec in specs if spack.repo.exists(spec.name)
        ]  # skip unknown packages.

        # Filter blacklisted packages early
        writers = [x for x in writers if not x.conf.blacklisted]

        # Detect name clashes in module files
        file2writer = collections.defaultdict(list)
        for item in writers:
            file2writer[item.layout.filename].append(item)

        if len(file2writer) != len(writers):
            message = 'Name clashes detected in module files:\n'
            for filename, writer_list in file2writer.items():
                if len(writer_list) > 1:
                    message += '\nfile: {0}\n'.format(filename)
                    for x in writer_list:
                        message += 'spec: {0}\n'.format(x.spec.format())
            tty.error(message)
            tty.error('Operation aborted')
            raise SystemExit(1)

        if len(writers) == 0:
            msg = 'Nothing to be done for {0} module files.'
            tty.msg(msg.format(module_type))
            continue

        # If we arrived here we have at least one writer
        module_type_root = writers[0].layout.dirname()
        # Proceed regenerating module files
        tty.msg('Regenerating {name} module files'.format(name=module_type))
        if os.path.isdir(module_type_root) and args.delete_tree:
            shutil.rmtree(module_type_root, ignore_errors=False)
        filesystem.mkdirp(module_type_root)
        for x in writers:
            try:
                x.write(overwrite=True)
            except Exception as e:
                msg = 'Could not write module file [{0}]'
                tty.warn(msg.format(x.layout.filename))
                tty.warn('\t--> {0} <--'.format(str(e)))
Exemplo n.º 42
0
def validate_section_name(section):
    """Exit if the section is not a valid section."""
    if section not in section_schemas:
        tty.die("Invalid config section: '%s'. Options are: %s" %
                (section, " ".join(section_schemas.keys())))
Exemplo n.º 43
0
def config_update(args):
    # Read the configuration files
    spack.config.config.get_config(args.section, scope=args.scope)
    updates = spack.config.config.format_updates[args.section]

    cannot_overwrite, skip_system_scope = [], False
    for scope in updates:
        cfg_file = spack.config.config.get_config_filename(
            scope.name, args.section)
        scope_dir = scope.path
        can_be_updated = _can_update_config_file(scope_dir, cfg_file)
        if not can_be_updated:
            if scope.name == 'system':
                skip_system_scope = True
                msg = ('Not enough permissions to write to "system" scope. '
                       'Skipping update at that location [cfg={0}]')
                tty.warn(msg.format(cfg_file))
                continue
            cannot_overwrite.append((scope, cfg_file))

    if cannot_overwrite:
        msg = 'Detected permission issues with the following scopes:\n\n'
        for scope, cfg_file in cannot_overwrite:
            msg += '\t[scope={0}, cfg={1}]\n'.format(scope.name, cfg_file)
        msg += ('\nEither ensure that you have sufficient permissions to '
                'modify these files or do not include these scopes in the '
                'update.')
        tty.die(msg)

    if skip_system_scope:
        updates = [x for x in updates if x.name != 'system']

    # Report if there are no updates to be done
    if not updates:
        msg = 'No updates needed for "{0}" section.'
        tty.msg(msg.format(args.section))
        return

    proceed = True
    if not args.yes_to_all:
        msg = ('The following configuration files are going to be updated to'
               ' the latest schema format:\n\n')
        for scope in updates:
            cfg_file = spack.config.config.get_config_filename(
                scope.name, args.section)
            msg += '\t[scope={0}, file={1}]\n'.format(scope.name, cfg_file)
        msg += ('\nIf the configuration files are updated, versions of Spack '
                'that are older than this version may not be able to read '
                'them. Spack stores backups of the updated files which can '
                'be retrieved with "spack config revert"')
        tty.msg(msg)
        proceed = tty.get_yes_or_no('Do you want to proceed?', default=False)

    if not proceed:
        tty.die('Operation aborted.')

    # Get a function to update the format
    update_fn = spack.config.ensure_latest_format_fn(args.section)
    for scope in updates:
        cfg_file = spack.config.config.get_config_filename(
            scope.name, args.section)
        with open(cfg_file) as f:
            data = syaml.load(f) or {}
            data = data.pop(args.section, {})
        update_fn(data)

        # Make a backup copy and rewrite the file
        bkp_file = cfg_file + '.bkp'
        shutil.copy(cfg_file, bkp_file)
        spack.config.config.update_config(args.section,
                                          data,
                                          scope=scope.name,
                                          force=True)
        msg = 'File "{0}" updated [backup={1}]'
        tty.msg(msg.format(cfg_file, bkp_file))
Exemplo n.º 44
0
def extract_tarball(spec, filename, allow_root=False, unsigned=False,
                    force=False):
    """
    extract binary tarball for given package into install area
    """
    if os.path.exists(spec.prefix):
        if force:
            shutil.rmtree(spec.prefix)
        else:
            raise NoOverwriteException(str(spec.prefix))

    tmpdir = tempfile.mkdtemp()
    stagepath = os.path.dirname(filename)
    spackfile_name = tarball_name(spec, '.spack')
    spackfile_path = os.path.join(stagepath, spackfile_name)
    tarfile_name = tarball_name(spec, '.tar.gz')
    tarfile_path = os.path.join(tmpdir, tarfile_name)
    specfile_name = tarball_name(spec, '.spec.yaml')
    specfile_path = os.path.join(tmpdir, specfile_name)

    with closing(tarfile.open(spackfile_path, 'r')) as tar:
        tar.extractall(tmpdir)
    if not unsigned:
        if os.path.exists('%s.asc' % specfile_path):
            try:
                Gpg.verify('%s.asc' % specfile_path, specfile_path)
            except Exception as e:
                shutil.rmtree(tmpdir)
                tty.die(str(e))
        else:
            shutil.rmtree(tmpdir)
            raise NoVerifyException(
                "Package spec file failed signature verification.\n"
                "Use spack buildcache keys to download "
                "and install a key for verification from the mirror.")
    # get the sha256 checksum of the tarball
    checksum = checksum_tarball(tarfile_path)

    # get the sha256 checksum recorded at creation
    spec_dict = {}
    with open(specfile_path, 'r') as inputfile:
        content = inputfile.read()
        spec_dict = syaml.load(content)
    bchecksum = spec_dict['binary_cache_checksum']

    # if the checksums don't match don't install
    if bchecksum['hash'] != checksum:
        shutil.rmtree(tmpdir)
        raise NoChecksumException(
            "Package tarball failed checksum verification.\n"
            "It cannot be installed.")

    new_relative_prefix = str(os.path.relpath(spec.prefix,
                                              spack.store.layout.root))
    # if the original relative prefix is in the spec file use it
    buildinfo = spec_dict.get('buildinfo', {})
    old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix)
    # if the original relative prefix and new relative prefix differ the
    # directory layout has changed and the  buildcache cannot be installed
    if old_relative_prefix != new_relative_prefix:
        shutil.rmtree(tmpdir)
        msg = "Package tarball was created from an install "
        msg += "prefix with a different directory layout.\n"
        msg += "It cannot be relocated."
        raise NewLayoutException(msg)

    # extract the tarball in a temp directory
    with closing(tarfile.open(tarfile_path, 'r')) as tar:
        tar.extractall(path=tmpdir)
    # the base of the install prefix is used when creating the tarball
    # so the pathname should be the same now that the directory layout
    # is confirmed
    workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))

    # cleanup
    os.remove(tarfile_path)
    os.remove(specfile_path)

    try:
        relocate_package(workdir, allow_root)
    except Exception as e:
        shutil.rmtree(workdir)
        tty.die(str(e))
    # Delay creating spec.prefix until verification is complete
    # and any relocation has been done.
    else:
        install_tree(workdir, spec.prefix, symlinks=True)
    finally:
        shutil.rmtree(tmpdir)
Exemplo n.º 45
0
def install_specs(cli_args, kwargs, specs):
    """Do the actual installation.

    Args:
        cli_args (argparse.Namespace): argparse namespace with command arguments
        kwargs (dict):  keyword arguments
        specs (list):  list of (abstract, concrete) spec tuples
    """

    # handle active environment, if any
    env = ev.active_environment()

    try:
        if env:
            specs_to_install = []
            specs_to_add = []
            for abstract, concrete in specs:
                # This won't find specs added to the env since last
                # concretize, therefore should we consider enforcing
                # concretization of the env before allowing to install
                # specs?
                m_spec = env.matching_spec(abstract)

                # If there is any ambiguity in the above call to matching_spec
                # (i.e. if more than one spec in the environment matches), then
                # SpackEnvironmentError is raised, with a message listing the
                # the matches.  Getting to this point means there were either
                # no matches or exactly one match.

                if not m_spec:
                    tty.debug('{0} matched nothing in the env'.format(
                        abstract.name))
                    # no matches in the env
                    if cli_args.no_add:
                        msg = ('You asked to install {0} without adding it ' +
                               '(--no-add), but no such spec exists in ' +
                               'environment').format(abstract.name)
                        tty.die(msg)
                    else:
                        tty.debug('adding {0} as a root'.format(abstract.name))
                        specs_to_add.append((abstract, concrete))

                    continue

                tty.debug('exactly one match for {0} in env -> {1}'.format(
                    m_spec.name, m_spec.dag_hash()))

                if m_spec in env.roots() or cli_args.no_add:
                    # either the single match is a root spec (and --no-add is
                    # the default for roots) or --no-add was stated explicitly
                    tty.debug('just install {0}'.format(m_spec.name))
                    specs_to_install.append(m_spec)
                else:
                    # the single match is not a root (i.e. it's a dependency),
                    # and --no-add was not specified, so we'll add it as a
                    # root before installing
                    tty.debug('add {0} then install it'.format(m_spec.name))
                    specs_to_add.append((abstract, concrete))

            if specs_to_add:
                tty.debug('Adding the following specs as roots:')
                for abstract, concrete in specs_to_add:
                    tty.debug('  {0}'.format(abstract.name))
                    with env.write_transaction():
                        specs_to_install.append(
                            env.concretize_and_add(abstract, concrete))
                        env.write(regenerate=False)

            # Install the validated list of cli specs
            if specs_to_install:
                tty.debug('Installing the following cli specs:')
                for s in specs_to_install:
                    tty.debug('  {0}'.format(s.name))
                env.install_specs(specs_to_install, args=cli_args, **kwargs)
        else:
            installs = [(concrete.package, kwargs) for _, concrete in specs]
            builder = PackageInstaller(installs)
            builder.install()
    except spack.build_environment.InstallError as e:
        if cli_args.show_log_on_error:
            e.print_context()
            if not os.path.exists(e.pkg.build_log_path):
                tty.error("'spack install' created no log.")
            else:
                sys.stderr.write('Full build log:\n')
                with open(e.pkg.build_log_path) as log:
                    shutil.copyfileobj(log, sys.stderr)
        raise
Exemplo n.º 46
0
def extensions(parser, args):
    if not args.spec:
        # If called without arguments, list all the extendable packages
        isatty = sys.stdout.isatty()
        if isatty:
            tty.info('Extendable packages:')

        extendable_pkgs = []
        for name in spack.repo.all_package_names():
            pkg = spack.repo.get(name)
            if pkg.extendable:
                extendable_pkgs.append(name)

        colify(extendable_pkgs, indent=4)
        return

    # Checks
    spec = cmd.parse_specs(args.spec)
    if len(spec) > 1:
        tty.die("Can only list extensions for one package.")

    if not spec[0].package.extendable:
        tty.die("%s is not an extendable package." % spec[0].name)

    env = ev.active_environment()
    spec = cmd.disambiguate_spec(spec[0], env)

    if not spec.package.extendable:
        tty.die("%s does not have extensions." % spec.short_spec)

    if args.show in ("packages", "all"):
        # List package names of extensions
        extensions = spack.repo.path.extensions_for(spec)
        if not extensions:
            tty.msg("%s has no extensions." % spec.cshort_spec)
        else:
            tty.msg(spec.cshort_spec)
            tty.msg("%d extensions:" % len(extensions))
            colify(ext.name for ext in extensions)

    if args.view:
        target = args.view
    else:
        target = spec.prefix

    view = YamlFilesystemView(target, spack.store.layout)

    if args.show in ("installed", "all"):
        # List specs of installed extensions.
        installed = [
            s.spec for s in spack.store.db.installed_extensions_for(spec)
        ]

        if args.show == "all":
            print
        if not installed:
            tty.msg("None installed.")
        else:
            tty.msg("%d installed:" % len(installed))
            cmd.display_specs(installed, args)

    if args.show in ("activated", "all"):
        # List specs of activated extensions.
        activated = view.extensions_layout.extension_map(spec)
        if args.show == "all":
            print
        if not activated:
            tty.msg("None activated.")
        else:
            tty.msg("%d activated:" % len(activated))
            cmd.display_specs(activated.values(), args)
Exemplo n.º 47
0
def ci_rebuild(args):
    """Check a single spec against the remote mirror, and rebuild it from
       source if the mirror does not contain the full hash match of the spec
       as computed locally. """
    env = ev.get_env(args, 'ci rebuild', required=True)

    # Make sure the environment is "gitlab-enabled", or else there's nothing
    # to do.
    yaml_root = ev.config_dict(env.yaml)
    gitlab_ci = None
    if 'gitlab-ci' in yaml_root:
        gitlab_ci = yaml_root['gitlab-ci']

    if not gitlab_ci:
        tty.die('spack ci rebuild requires an env containing gitlab-ci cfg')

    # Grab the environment variables we need.  These either come from the
    # pipeline generation step ("spack ci generate"), where they were written
    # out as variables, or else provided by GitLab itself.
    pipeline_artifacts_dir = get_env_var('SPACK_ARTIFACTS_ROOT')
    job_log_dir = get_env_var('SPACK_JOB_LOG_DIR')
    repro_dir = get_env_var('SPACK_JOB_REPRO_DIR')
    local_mirror_dir = get_env_var('SPACK_LOCAL_MIRROR_DIR')
    concrete_env_dir = get_env_var('SPACK_CONCRETE_ENV_DIR')
    ci_pipeline_id = get_env_var('CI_PIPELINE_ID')
    ci_job_name = get_env_var('CI_JOB_NAME')
    signing_key = get_env_var('SPACK_SIGNING_KEY')
    root_spec = get_env_var('SPACK_ROOT_SPEC')
    job_spec_pkg_name = get_env_var('SPACK_JOB_SPEC_PKG_NAME')
    compiler_action = get_env_var('SPACK_COMPILER_ACTION')
    cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
    related_builds = get_env_var('SPACK_RELATED_BUILDS_CDASH')
    spack_pipeline_type = get_env_var('SPACK_PIPELINE_TYPE')
    pr_mirror_url = get_env_var('SPACK_PR_MIRROR_URL')
    remote_mirror_url = get_env_var('SPACK_REMOTE_MIRROR_URL')

    # Construct absolute paths relative to current $CI_PROJECT_DIR
    ci_project_dir = get_env_var('CI_PROJECT_DIR')
    pipeline_artifacts_dir = os.path.join(ci_project_dir,
                                          pipeline_artifacts_dir)
    job_log_dir = os.path.join(ci_project_dir, job_log_dir)
    repro_dir = os.path.join(ci_project_dir, repro_dir)
    local_mirror_dir = os.path.join(ci_project_dir, local_mirror_dir)
    concrete_env_dir = os.path.join(ci_project_dir, concrete_env_dir)

    # Debug print some of the key environment variables we should have received
    tty.debug('pipeline_artifacts_dir = {0}'.format(pipeline_artifacts_dir))
    tty.debug('root_spec = {0}'.format(root_spec))
    tty.debug('remote_mirror_url = {0}'.format(remote_mirror_url))
    tty.debug('job_spec_pkg_name = {0}'.format(job_spec_pkg_name))
    tty.debug('compiler_action = {0}'.format(compiler_action))

    # Query the environment manifest to find out whether we're reporting to a
    # CDash instance, and if so, gather some information from the manifest to
    # support that task.
    enable_cdash = False
    if 'cdash' in yaml_root:
        enable_cdash = True
        ci_cdash = yaml_root['cdash']
        job_spec_buildgroup = ci_cdash['build-group']
        cdash_base_url = ci_cdash['url']
        cdash_project = ci_cdash['project']
        proj_enc = urlencode({'project': cdash_project})
        eq_idx = proj_enc.find('=') + 1
        cdash_project_enc = proj_enc[eq_idx:]
        cdash_site = ci_cdash['site']
        tty.debug('cdash_base_url = {0}'.format(cdash_base_url))
        tty.debug('cdash_project = {0}'.format(cdash_project))
        tty.debug('cdash_project_enc = {0}'.format(cdash_project_enc))
        tty.debug('cdash_build_name = {0}'.format(cdash_build_name))
        tty.debug('cdash_site = {0}'.format(cdash_site))
        tty.debug('related_builds = {0}'.format(related_builds))
        tty.debug('job_spec_buildgroup = {0}'.format(job_spec_buildgroup))

    # Is this a pipeline run on a spack PR or a merge to develop?  It might
    # be neither, e.g. a pipeline run on some environment repository.
    spack_is_pr_pipeline = spack_pipeline_type == 'spack_pull_request'
    spack_is_develop_pipeline = spack_pipeline_type == 'spack_protected_branch'

    tty.debug('Pipeline type - PR: {0}, develop: {1}'.format(
        spack_is_pr_pipeline, spack_is_develop_pipeline))

    # Figure out what is our temporary storage mirror: Is it artifacts
    # buildcache?  Or temporary-storage-url-prefix?  In some cases we need to
    # force something or pipelines might not have a way to propagate build
    # artifacts from upstream to downstream jobs.
    pipeline_mirror_url = None

    temp_storage_url_prefix = None
    if 'temporary-storage-url-prefix' in gitlab_ci:
        temp_storage_url_prefix = gitlab_ci['temporary-storage-url-prefix']
        pipeline_mirror_url = url_util.join(temp_storage_url_prefix,
                                            ci_pipeline_id)

    enable_artifacts_mirror = False
    if 'enable-artifacts-buildcache' in gitlab_ci:
        enable_artifacts_mirror = gitlab_ci['enable-artifacts-buildcache']
        if (enable_artifacts_mirror
                or (spack_is_pr_pipeline and not enable_artifacts_mirror
                    and not temp_storage_url_prefix)):
            # If you explicitly enabled the artifacts buildcache feature, or
            # if this is a PR pipeline but you did not enable either of the
            # per-pipeline temporary storage features, we force the use of
            # artifacts buildcache.  Otherwise jobs will not have binary
            # dependencies from previous stages available since we do not
            # allow pushing binaries to the remote mirror during PR pipelines.
            enable_artifacts_mirror = True
            pipeline_mirror_url = 'file://' + local_mirror_dir
            mirror_msg = 'artifact buildcache enabled, mirror url: {0}'.format(
                pipeline_mirror_url)
            tty.debug(mirror_msg)

    # Whatever form of root_spec we got, use it to get a map giving us concrete
    # specs for this job and all of its dependencies.
    spec_map = spack_ci.get_concrete_specs(env, root_spec, job_spec_pkg_name,
                                           related_builds, compiler_action)
    job_spec = spec_map[job_spec_pkg_name]

    job_spec_yaml_file = '{0}.yaml'.format(job_spec_pkg_name)
    job_spec_yaml_path = os.path.join(repro_dir, job_spec_yaml_file)

    # To provide logs, cdash reports, etc for developer download/perusal,
    # these things have to be put into artifacts.  This means downstream
    # jobs that "need" this job will get those artifacts too.  So here we
    # need to clean out the artifacts we may have got from upstream jobs.

    cdash_report_dir = os.path.join(pipeline_artifacts_dir, 'cdash_report')
    if os.path.exists(cdash_report_dir):
        shutil.rmtree(cdash_report_dir)

    if os.path.exists(job_log_dir):
        shutil.rmtree(job_log_dir)

    if os.path.exists(repro_dir):
        shutil.rmtree(repro_dir)

    # Now that we removed them if they existed, create the directories we
    # need for storing artifacts.  The cdash_report directory will be
    # created internally if needed.
    os.makedirs(job_log_dir)
    os.makedirs(repro_dir)

    # Copy the concrete environment files to the repro directory so we can
    # expose them as artifacts and not conflict with the concrete environment
    # files we got as artifacts from the upstream pipeline generation job.
    # Try to cast a slightly wider net too, and hopefully get the generated
    # pipeline yaml.  If we miss it, the user will still be able to go to the
    # pipeline generation job and get it from there.
    target_dirs = [concrete_env_dir, pipeline_artifacts_dir]

    for dir_to_list in target_dirs:
        for file_name in os.listdir(dir_to_list):
            src_file = os.path.join(dir_to_list, file_name)
            if os.path.isfile(src_file):
                dst_file = os.path.join(repro_dir, file_name)
                shutil.copyfile(src_file, dst_file)

    # If signing key was provided via "SPACK_SIGNING_KEY", then try to
    # import it.
    if signing_key:
        spack_ci.import_signing_key(signing_key)

    # Depending on the specifics of this job, we might need to turn on the
    # "config:install_missing compilers" option (to build this job spec
    # with a bootstrapped compiler), or possibly run "spack compiler find"
    # (to build a bootstrap compiler or one of its deps in a
    # compiler-agnostic way), or maybe do nothing at all (to build a spec
    # using a compiler already installed on the target system).
    spack_ci.configure_compilers(compiler_action)

    # Write this job's spec yaml into the reproduction directory, and it will
    # also be used in the generated "spack install" command to install the spec
    tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path))
    with open(job_spec_yaml_path, 'w') as fd:
        fd.write(job_spec.to_yaml(hash=ht.build_hash))

    # Write the concrete root spec yaml into the reproduction directory
    root_spec_yaml_path = os.path.join(repro_dir, 'root.yaml')
    with open(root_spec_yaml_path, 'w') as fd:
        fd.write(spec_map['root'].to_yaml(hash=ht.build_hash))

    # Write some other details to aid in reproduction into an artifact
    repro_file = os.path.join(repro_dir, 'repro.json')
    repro_details = {
        'job_name': ci_job_name,
        'job_spec_yaml': job_spec_yaml_file,
        'root_spec_yaml': 'root.yaml',
        'ci_project_dir': ci_project_dir
    }
    with open(repro_file, 'w') as fd:
        fd.write(json.dumps(repro_details))

    # Write information about spack into an artifact in the repro dir
    spack_info = spack_ci.get_spack_info()
    spack_info_file = os.path.join(repro_dir, 'spack_info.txt')
    with open(spack_info_file, 'w') as fd:
        fd.write('\n{0}\n'.format(spack_info))

    # If we decided there should be a temporary storage mechanism, add that
    # mirror now so it's used when we check for a full hash match already
    # built for this spec.
    if pipeline_mirror_url:
        spack.mirror.add(spack_ci.TEMP_STORAGE_MIRROR_NAME,
                         pipeline_mirror_url, cfg.default_modify_scope())

    cdash_build_id = None
    cdash_build_stamp = None

    # Check configured mirrors for a built spec with a matching full hash
    matches = bindist.get_mirrors_for_spec(job_spec,
                                           full_hash_match=True,
                                           index_only=False)

    if matches:
        # Got a full hash match on at least one configured mirror.  All
        # matches represent the fully up-to-date spec, so should all be
        # equivalent.  If artifacts mirror is enabled, we just pick one
        # of the matches and download the buildcache files from there to
        # the artifacts, so they're available to be used by dependent
        # jobs in subsequent stages.
        tty.msg('No need to rebuild {0}, found full hash match at: '.format(
            job_spec_pkg_name))
        for match in matches:
            tty.msg('    {0}'.format(match['mirror_url']))
        if enable_artifacts_mirror:
            matching_mirror = matches[0]['mirror_url']
            build_cache_dir = os.path.join(local_mirror_dir, 'build_cache')
            tty.debug('Getting {0} buildcache from {1}'.format(
                job_spec_pkg_name, matching_mirror))
            tty.debug('Downloading to {0}'.format(build_cache_dir))
            buildcache.download_buildcache_files(job_spec, build_cache_dir,
                                                 False, matching_mirror)

        # Now we are done and successful
        sys.exit(0)

    # No full hash match anywhere means we need to rebuild spec

    # Start with spack arguments
    install_args = [base_arg for base_arg in CI_REBUILD_INSTALL_BASE_ARGS]

    config = cfg.get('config')
    if not config['verify_ssl']:
        install_args.append('-k')

    install_args.extend([
        'install',
        '--keep-stage',
        '--require-full-hash-match',
    ])

    can_verify = spack_ci.can_verify_binaries()
    verify_binaries = can_verify and spack_is_pr_pipeline is False
    if not verify_binaries:
        install_args.append('--no-check-signature')

    # If CDash reporting is enabled, we first register this build with
    # the specified CDash instance, then relate the build to those of
    # its dependencies.
    if enable_cdash:
        tty.debug('CDash: Registering build')
        (cdash_build_id, cdash_build_stamp) = spack_ci.register_cdash_build(
            cdash_build_name, cdash_base_url, cdash_project, cdash_site,
            job_spec_buildgroup)

        if cdash_build_id is not None:
            cdash_upload_url = '{0}/submit.php?project={1}'.format(
                cdash_base_url, cdash_project_enc)

            install_args.extend([
                '--cdash-upload-url',
                cdash_upload_url,
                '--cdash-build',
                cdash_build_name,
                '--cdash-site',
                cdash_site,
                '--cdash-buildstamp',
                cdash_build_stamp,
            ])

            tty.debug('CDash: Relating build with dependency builds')
            spack_ci.relate_cdash_builds(
                spec_map, cdash_base_url, cdash_build_id, cdash_project,
                [pipeline_mirror_url, pr_mirror_url, remote_mirror_url])

    # A compiler action of 'FIND_ANY' means we are building a bootstrap
    # compiler or one of its deps.
    # TODO: when compilers are dependencies, we should include --no-add
    if compiler_action != 'FIND_ANY':
        install_args.append('--no-add')

    # TODO: once we have the concrete spec registry, use the DAG hash
    # to identify the spec to install, rather than the concrete spec
    # yaml file.
    install_args.extend(['-f', job_spec_yaml_path])

    tty.debug('Installing {0} from source'.format(job_spec.name))
    tty.debug('spack install arguments: {0}'.format(install_args))

    # Write the install command to a shell script
    with open('install.sh', 'w') as fd:
        fd.write('#!/bin/bash\n\n')
        fd.write('\n# spack install command\n')
        fd.write(' '.join(['"{0}"'.format(i) for i in install_args]))
        fd.write('\n')

    st = os.stat('install.sh')
    os.chmod('install.sh', st.st_mode | stat.S_IEXEC)

    install_copy_path = os.path.join(repro_dir, 'install.sh')
    shutil.copyfile('install.sh', install_copy_path)

    # Run the generated install.sh shell script as if it were being run in
    # a login shell.
    try:
        install_process = subprocess.Popen(['bash', '-l', './install.sh'])
        install_process.wait()
        install_exit_code = install_process.returncode
    except (ValueError, subprocess.CalledProcessError, OSError) as inst:
        tty.error('Encountered error running install script')
        tty.error(inst)

    # Now do the post-install tasks
    tty.debug('spack install exited {0}'.format(install_exit_code))

    # If a spec fails to build in a spack develop pipeline, we add it to a
    # list of known broken full hashes.  This allows spack PR pipelines to
    # avoid wasting compute cycles attempting to build those hashes.
    if install_exit_code == INSTALL_FAIL_CODE and spack_is_develop_pipeline:
        tty.debug('Install failed on develop')
        if 'broken-specs-url' in gitlab_ci:
            broken_specs_url = gitlab_ci['broken-specs-url']
            dev_fail_hash = job_spec.full_hash()
            broken_spec_path = url_util.join(broken_specs_url, dev_fail_hash)
            tty.msg('Reporting broken develop build as: {0}'.format(
                broken_spec_path))
            tmpdir = tempfile.mkdtemp()
            empty_file_path = os.path.join(tmpdir, 'empty.txt')

            broken_spec_details = {
                'broken-spec': {
                    'job-url': get_env_var('CI_JOB_URL'),
                    'pipeline-url': get_env_var('CI_PIPELINE_URL'),
                    'concrete-spec-yaml': job_spec.to_dict(hash=ht.full_hash)
                }
            }

            try:
                with open(empty_file_path, 'w') as efd:
                    efd.write(syaml.dump(broken_spec_details))
                web_util.push_to_url(empty_file_path,
                                     broken_spec_path,
                                     keep_original=False,
                                     extra_args={'ContentType': 'text/plain'})
            except Exception as err:
                # If we got some kind of S3 (access denied or other connection
                # error), the first non boto-specific class in the exception
                # hierarchy is Exception.  Just print a warning and return
                msg = 'Error writing to broken specs list {0}: {1}'.format(
                    broken_spec_path, err)
                tty.warn(msg)
            finally:
                shutil.rmtree(tmpdir)

    # We generated the "spack install ..." command to "--keep-stage", copy
    # any logs from the staging directory to artifacts now
    spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)

    # Create buildcache on remote mirror, either on pr-specific mirror or
    # on the main mirror defined in the gitlab-enabled spack environment
    if spack_is_pr_pipeline:
        buildcache_mirror_url = pr_mirror_url
    else:
        buildcache_mirror_url = remote_mirror_url

    # If the install succeeded, create a buildcache entry for this job spec
    # and push it to one or more mirrors.  If the install did not succeed,
    # print out some instructions on how to reproduce this build failure
    # outside of the pipeline environment.
    if install_exit_code == 0:
        can_sign = spack_ci.can_sign_binaries()
        sign_binaries = can_sign and spack_is_pr_pipeline is False

        # Create buildcache in either the main remote mirror, or in the
        # per-PR mirror, if this is a PR pipeline
        if buildcache_mirror_url:
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          buildcache_mirror_url, sign_binaries)

            if cdash_build_id:
                tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
                    cdash_build_id, buildcache_mirror_url))
                spack_ci.write_cdashid_to_mirror(cdash_build_id, job_spec,
                                                 buildcache_mirror_url)

        # Create another copy of that buildcache in the per-pipeline
        # temporary storage mirror (this is only done if either
        # artifacts buildcache is enabled or a temporary storage url
        # prefix is set)
        if pipeline_mirror_url:
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          pipeline_mirror_url, sign_binaries)

            if cdash_build_id:
                tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
                    cdash_build_id, pipeline_mirror_url))
                spack_ci.write_cdashid_to_mirror(cdash_build_id, job_spec,
                                                 pipeline_mirror_url)

        # If this is a develop pipeline, check if the spec that we just built is
        # on the broken-specs list. If so, remove it.
        if spack_is_develop_pipeline and 'broken-specs-url' in gitlab_ci:
            broken_specs_url = gitlab_ci['broken-specs-url']
            just_built_hash = job_spec.full_hash()
            broken_spec_path = url_util.join(broken_specs_url, just_built_hash)
            if web_util.url_exists(broken_spec_path):
                tty.msg('Removing {0} from the list of broken specs'.format(
                    broken_spec_path))
                try:
                    web_util.remove_url(broken_spec_path)
                except Exception as err:
                    # If we got some kind of S3 (access denied or other connection
                    # error), the first non boto-specific class in the exception
                    # hierarchy is Exception.  Just print a warning and return
                    msg = 'Error removing {0} from broken specs list: {1}'.format(
                        broken_spec_path, err)
                    tty.warn(msg)

    else:
        tty.debug('spack install exited non-zero, will not create buildcache')

        api_root_url = get_env_var('CI_API_V4_URL')
        ci_project_id = get_env_var('CI_PROJECT_ID')
        ci_job_id = get_env_var('CI_JOB_ID')

        repro_job_url = '{0}/projects/{1}/jobs/{2}/artifacts'.format(
            api_root_url, ci_project_id, ci_job_id)

        # Control characters cause this to be printed in blue so it stands out
        reproduce_msg = """

\033[34mTo reproduce this build locally, run:

    spack ci reproduce-build {0} [--working-dir <dir>]

If this project does not have public pipelines, you will need to first:

    export GITLAB_PRIVATE_TOKEN=<generated_token>

... then follow the printed instructions.\033[0;0m

""".format(repro_job_url)

        print(reproduce_msg)

    # Tie job success/failure to the success/failure of building the spec
    return install_exit_code
Exemplo n.º 48
0
def get_checksums_for_versions(url_dict, name, **kwargs):
    """Fetches and checksums archives from URLs.

    This function is called by both ``spack checksum`` and ``spack
    create``.  The ``first_stage_function`` argument allows the caller to
    inspect the first downloaded archive, e.g., to determine the build
    system.

    Args:
        url_dict (dict): A dictionary of the form: version -> URL
        name (str): The name of the package
        first_stage_function (typing.Callable): function that takes a Stage and a URL;
            this is run on the stage of the first URL downloaded
        keep_stage (bool): whether to keep staging area when command completes
        batch (bool): whether to ask user how many versions to fetch (false)
            or fetch all versions (true)
        latest (bool): whether to take the latest version (true) or all (false)
        fetch_options (dict): Options used for the fetcher (such as timeout
            or cookies)

    Returns:
        (str): A multi-line string containing versions and corresponding hashes

    """
    batch = kwargs.get('batch', False)
    fetch_options = kwargs.get('fetch_options', None)
    first_stage_function = kwargs.get('first_stage_function', None)
    keep_stage = kwargs.get('keep_stage', False)
    latest = kwargs.get('latest', False)

    sorted_versions = sorted(url_dict.keys(), reverse=True)
    if latest:
        sorted_versions = sorted_versions[:1]

    # Find length of longest string in the list for padding
    max_len = max(len(str(v)) for v in sorted_versions)
    num_ver = len(sorted_versions)

    tty.msg(
        'Found {0} version{1} of {2}:'.format(num_ver,
                                              '' if num_ver == 1 else 's',
                                              name), '',
        *llnl.util.lang.elide_list([
            '{0:{1}}  {2}'.format(str(v), max_len, url_dict[v])
            for v in sorted_versions
        ]))
    print()

    if batch or latest:
        archives_to_fetch = len(sorted_versions)
    else:
        archives_to_fetch = tty.get_number(
            "How many would you like to checksum?", default=1, abort='q')

    if not archives_to_fetch:
        tty.die("Aborted.")

    versions = sorted_versions[:archives_to_fetch]
    urls = [url_dict[v] for v in versions]

    tty.debug('Downloading...')
    version_hashes = []
    i = 0
    errors = []
    for url, version in zip(urls, versions):
        # Wheels should not be expanded during staging
        expand_arg = ''
        if url.endswith('.whl') or '.whl#' in url:
            expand_arg = ', expand=False'
        try:
            if fetch_options:
                url_or_fs = fs.URLFetchStrategy(url,
                                                fetch_options=fetch_options)
            else:
                url_or_fs = url
            with Stage(url_or_fs, keep=keep_stage) as stage:
                # Fetch the archive
                stage.fetch()
                if i == 0 and first_stage_function:
                    # Only run first_stage_function the first time,
                    # no need to run it every time
                    first_stage_function(stage, url)

                # Checksum the archive and add it to the list
                version_hashes.append(
                    (version,
                     spack.util.crypto.checksum(hashlib.sha256,
                                                stage.archive_file)))
                i += 1
        except FailedDownloadError:
            errors.append('Failed to fetch {0}'.format(url))
        except Exception as e:
            tty.msg('Something failed on {0}, skipping.  ({1})'.format(url, e))

    for msg in errors:
        tty.debug(msg)

    if not version_hashes:
        tty.die("Could not fetch any versions for {0}".format(name))

    # Find length of longest string in the list for padding
    max_len = max(len(str(v)) for v, h in version_hashes)

    # Generate the version directives to put in a package.py
    version_lines = "\n".join([
        "    version('{0}', {1}sha256='{2}'{3})".format(
            v, ' ' * (max_len - len(str(v))), h, expand_arg)
        for v, h in version_hashes
    ])

    num_hash = len(version_hashes)
    tty.debug('Checksummed {0} version{1} of {2}:'.format(
        num_hash, '' if num_hash == 1 else 's', name))

    return version_lines
Exemplo n.º 49
0
def _determine_specs_to_mirror(args):
    if args.specs and args.all:
        raise SpackError("Cannot specify specs on command line if you"
                         " chose to mirror all specs with '--all'")
    elif args.file and args.all:
        raise SpackError("Cannot specify specs with a file ('-f') if you"
                         " chose to mirror all specs with '--all'")

    if not args.versions_per_spec:
        num_versions = 1
    elif args.versions_per_spec == 'all':
        num_versions = 'all'
    else:
        try:
            num_versions = int(args.versions_per_spec)
        except ValueError:
            raise SpackError("'--versions-per-spec' must be a number or 'all',"
                             " got '{0}'".format(args.versions_per_spec))

    # try to parse specs from the command line first.
    with spack.concretize.disable_compiler_existence_check():
        specs = spack.cmd.parse_specs(args.specs, concretize=True)

        # If there is a file, parse each line as a spec and add it to the list.
        if args.file:
            if specs:
                tty.die("Cannot pass specs on the command line with --file.")
            specs = _read_specs_from_file(args.file)

        env_specs = None
        if not specs:
            # If nothing is passed, use environment or all if no active env
            if not args.all:
                tty.die(
                    "No packages were specified.",
                    "To mirror all packages, use the '--all' option"
                    " (this will require significant time and space).")

            env = ev.get_env(args, 'mirror')
            if env:
                env_specs = env.all_specs()
            else:
                specs = [Spec(n) for n in spack.repo.all_package_names()]
        else:
            # If the user asked for dependencies, traverse spec DAG get them.
            if args.dependencies:
                new_specs = set()
                for spec in specs:
                    spec.concretize()
                    for s in spec.traverse():
                        new_specs.add(s)
                specs = list(new_specs)

            # Skip external specs, as they are already installed
            external_specs = [s for s in specs if s.external]
            specs = [s for s in specs if not s.external]

            for spec in external_specs:
                msg = 'Skipping {0} as it is an external spec.'
                tty.msg(msg.format(spec.cshort_spec))

        if env_specs:
            if args.versions_per_spec:
                tty.warn("Ignoring '--versions-per-spec' for mirroring specs"
                         " in environment.")
            mirror_specs = env_specs
        else:
            if num_versions == 'all':
                mirror_specs = spack.mirror.get_all_versions(specs)
            else:
                mirror_specs = spack.mirror.get_matching_versions(
                    specs, num_versions=num_versions)
            mirror_specs.sort(key=lambda s: (s.name, s.version))

    exclude_specs = []
    if args.exclude_file:
        exclude_specs.extend(_read_specs_from_file(args.exclude_file))
    if args.exclude_specs:
        exclude_specs.extend(
            spack.cmd.parse_specs(str(args.exclude_specs).split()))
    if exclude_specs:
        mirror_specs = list(
            x for x in mirror_specs
            if not any(x.satisfies(y, strict=True) for y in exclude_specs))

    return mirror_specs
Exemplo n.º 50
0
Arquivo: xl.py Projeto: nvarini1/spack
 def cxx11_flag(self):
     if self.version < ver('13.1'):
         tty.die("Only xlC 13.1 and above have some c++11 support.")
     else:
         return "-qlanglvl=extended0x"
Exemplo n.º 51
0
def get_uninstall_list(args, specs, env):
    # Gets the list of installed specs that match the ones give via cli
    # args.all takes care of the case where '-a' is given in the cli
    uninstall_list = find_matching_specs(env, specs, args.all, args.force)

    # Takes care of '-R'
    active_dpts, inactive_dpts = installed_dependents(uninstall_list, env)

    # if we are in the global scope, we complain if you try to remove a
    # spec that's in an environment.  If we're in an environment, we'll
    # just *remove* it from the environment, so we ignore this
    # error when *in* an environment
    spec_envs = dependent_environments(uninstall_list)
    spec_envs = inactive_dependent_environments(spec_envs)

    # Process spec_dependents and update uninstall_list
    has_error = not args.force and (
        (active_dpts and not args.dependents)  # dependents in the current env
        or (not env and spec_envs)  # there are environments that need specs
    )

    # say why each problem spec is needed
    if has_error:
        specs = set(active_dpts)
        if not env:
            specs.update(set(spec_envs))  # environments depend on this

        for i, spec in enumerate(sorted(specs)):
            # space out blocks of reasons
            if i > 0:
                print()

            spec_format = '{name}{@version}{%compiler}{/hash:7}'
            tty.info("Will not uninstall %s" % spec.cformat(spec_format),
                     format='*r')

            dependents = active_dpts.get(spec)
            if dependents:
                print('The following packages depend on it:')
                spack.cmd.display_specs(dependents, **display_args)

            if not env:
                envs = spec_envs.get(spec)
                if envs:
                    print('It is used by the following environments:')
                    colify([e.name for e in envs], indent=4)

        msgs = []
        if active_dpts:
            msgs.append(
                'use `spack uninstall --dependents` to remove dependents too')
        if spec_envs:
            msgs.append('use `spack env remove` to remove from environments')
        print()
        tty.die('There are still dependents.', *msgs)

    elif args.dependents:
        for spec, lst in active_dpts.items():
            uninstall_list.extend(lst)
        uninstall_list = list(set(uninstall_list))

    # only force-remove (don't completely uninstall) specs that still
    # have external dependent envs or pkgs
    removes = set(inactive_dpts)
    if env:
        removes.update(spec_envs)

    # remove anything in removes from the uninstall list
    uninstall_list = set(uninstall_list) - removes

    return uninstall_list, removes
Exemplo n.º 52
0
def main(argv=None):
    """This is the entry point for the Spack command.

    Args:
        argv (list of str or None): command line arguments, NOT including
            the executable name. If None, parses from sys.argv.
    """
    # Create a parser with a simple positional argument first.  We'll
    # lazily load the subcommand(s) we need later. This allows us to
    # avoid loading all the modules from spack.cmd when we don't need
    # them, which reduces startup latency.
    parser = make_argument_parser()
    parser.add_argument('command', nargs=argparse.REMAINDER)
    args, unknown = parser.parse_known_args(argv)

    # Recover stored LD_LIBRARY_PATH variables from spack shell function
    # This is necessary because MacOS System Integrity Protection clears
    # (DY?)LD_LIBRARY_PATH variables on process start.
    # Spack clears these variables before building and installing packages,
    # but needs to know the prior state for commands like `spack load` and
    # `spack env activate that modify the user environment.
    recovered_vars = ('LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH',
                      'DYLD_FALLBACK_LIBRARY_PATH')
    for var in recovered_vars:
        stored_var_name = 'SPACK_%s' % var
        if stored_var_name in os.environ:
            os.environ[var] = os.environ[stored_var_name]

    # make spack.config aware of any command line configuration scopes
    if args.config_scopes:
        spack.config.command_line_scopes = args.config_scopes

    # activate an environment if one was specified on the command line
    if not args.no_env:
        env = ev.find_environment(args)
        if env:
            ev.activate(env, args.use_env_repo, add_view=False)

    if args.print_shell_vars:
        print_setup_info(*args.print_shell_vars.split(','))
        return 0

    # Just print help and exit if run with no arguments at all
    no_args = (len(sys.argv) == 1) if argv is None else (len(argv) == 0)
    if no_args:
        parser.print_help()
        return 1

    # -h, -H, and -V are special as they do not require a command, but
    # all the other options do nothing without a command.
    if args.version:
        print(get_version())
        return 0
    elif args.help:
        sys.stdout.write(parser.format_help(level=args.help))
        return 0
    elif not args.command:
        parser.print_help()
        return 1

    try:
        # ensure options on spack command come before everything
        setup_main_options(args)

        # Try to load the particular command the caller asked for.
        cmd_name = args.command[0]
        cmd_name = aliases.get(cmd_name, cmd_name)

        command = parser.add_command(cmd_name)

        # Re-parse with the proper sub-parser added.
        args, unknown = parser.parse_known_args()

        # many operations will fail without a working directory.
        set_working_dir()

        # now we can actually execute the command.
        if args.spack_profile or args.sorted_profile:
            _profile_wrapper(command, parser, args, unknown)
        elif args.pdb:
            import pdb
            pdb.runctx('_invoke_command(command, parser, args, unknown)',
                       globals(), locals())
            return 0
        else:
            return _invoke_command(command, parser, args, unknown)

    except SpackError as e:
        tty.debug(e)
        e.die()  # gracefully die on any SpackErrors

    except Exception as e:
        if spack.config.get('config:debug'):
            raise
        tty.die(e)

    except KeyboardInterrupt:
        if spack.config.get('config:debug'):
            raise
        sys.stderr.write('\n')
        tty.die("Keyboard interrupt.")

    except SystemExit as e:
        if spack.config.get('config:debug'):
            traceback.print_exc()
        return e.code
Exemplo n.º 53
0
 def restage(self):
     tty.die("Cannot restage DIY stage.")
Exemplo n.º 54
0
def main(argv=None):
    """This is the entry point for the pymod command.

    Parameters
    ----------
    argv : list of str or None
        command line arguments, NOT including the executable name. If None,
        parses from sys.argv.
    """

    # Pull out the shell from argv
    argv = argv or sys.argv[1:]
    assert len(argv) >= 1
    shell = argv.pop(0)
    shells = ("bash", "csh", "python")
    if shell not in shells:
        raise ValueError("shell argument must by one of {0}".format(shells))

    # Create a parser with a simple positional argument first.  We'll
    # lazily load the subcommand(s) we need later. This allows us to
    # avoid loading all the modules from pymod.command when we don't need
    # them, which reduces startup latency.
    parser = make_argument_parser()
    parser.add_argument("command", nargs=argparse.REMAINDER)
    args, unknown = parser.parse_known_args(argv)
    args.shell = shell

    # Just print help and exit if run with no arguments at all
    no_args = len(argv) == 0
    if no_args:
        with redirect_stdout():
            parser.print_help()
        return 1

    # -h, -H, and -V are special as they do not require a command, but
    # all the other options do nothing without a command.
    if args.version:
        sys.stderr.write(str(pymod.pymod_version) + "\n")
        return 0
    elif args.help:
        message = parser.format_help(level=args.help)
        with redirect_stdout():
            sys.stderr.write(message)
        return 0
    elif not args.command:
        with redirect_stdout():
            parser.print_help()
        return 1

    try:
        # ensure options on pymod command come before everything
        setup_main_options(args)

        # Try to load the particular command the caller asked for.  If there
        # is no module for it, just die.
        cmd_name = args.command[0]
        cmd_name = aliases.get(cmd_name, cmd_name)

        try:
            command = parser.add_command(cmd_name)
        except ImportError:
            # if pymod.config.get('config:debug'):
            #     raise
            tty.die("Unknown command: %s" % args.command[0])

        # Re-parse with the proper sub-parser added.
        args, unknown = parser.parse_known_args(argv)

        # many operations will fail without a working directory.
        set_working_dir()

        # now we can actually execute the command.
        if args.pdb:
            import pdb

            pdb.runctx("_invoke_command(command, parser, args, unknown)",
                       globals(), locals())
            return 0
        else:
            return _invoke_command(command, parser, args, unknown)

    except Exception as e:
        if pymod.config.get("debug"):
            raise
        tty.die(str(e))

    except KeyboardInterrupt:
        sys.stderr.write("\n")
        tty.die("Keyboard interrupt.")

    except SystemExit as e:
        return e.code
Exemplo n.º 55
0
def ensure_access(file=spack.stage_path):
    """Ensure we can access a directory and die with an error if we can't."""
    if not can_access(file):
        tty.die("Insufficient permissions for %s" % file)
Exemplo n.º 56
0
def install(parser, args, **kwargs):
    if args.help_cdash:
        parser = argparse.ArgumentParser(
            formatter_class=argparse.RawDescriptionHelpFormatter,
            epilog=textwrap.dedent('''\
environment variables:
  SPACK_CDASH_AUTH_TOKEN
                        authentication token to present to CDash
                        '''))
        arguments.add_cdash_args(parser, True)
        parser.print_help()
        return

    reporter = spack.report.collect_info(spack.package.PackageInstaller,
                                         '_install_task', args.log_format,
                                         args)
    if args.log_file:
        reporter.filename = args.log_file

    if not args.spec and not args.specfiles:
        # if there are no args but an active environment
        # then install the packages from it.
        env = ev.get_env(args, 'install')
        if env:
            if not args.only_concrete:
                with env.write_transaction():
                    concretized_specs = env.concretize()
                    ev.display_specs(concretized_specs)

                    # save view regeneration for later, so that we only do it
                    # once, as it can be slow.
                    env.write(regenerate_views=False)

            specs = env.all_specs()
            if not args.log_file and not reporter.filename:
                reporter.filename = default_log_file(specs[0])
            reporter.specs = specs

            tty.msg("Installing environment {0}".format(env.name))
            with reporter:
                env.install_all(args, **kwargs)

            tty.debug("Regenerating environment views for {0}".format(
                env.name))
            with env.write_transaction():
                # It is not strictly required to synchronize view regeneration
                # but doing so can prevent redundant work in the filesystem.
                env.regenerate_views()
            return
        else:
            msg = "install requires a package argument or active environment"
            if 'spack.yaml' in os.listdir(os.getcwd()):
                # There's a spack.yaml file in the working dir, the user may
                # have intended to use that
                msg += "\n\n"
                msg += "Did you mean to install using the `spack.yaml`"
                msg += " in this directory? Try: \n"
                msg += "    spack env activate .\n"
                msg += "    spack install\n"
                msg += "  OR\n"
                msg += "    spack --env . install"
            tty.die(msg)

    if args.no_checksum:
        spack.config.set('config:checksum', False, scope='command_line')

    # Parse cli arguments and construct a dictionary
    # that will be passed to the package installer
    update_kwargs_from_args(args, kwargs)

    if args.run_tests:
        tty.warn("Deprecated option: --run-tests: use --test=all instead")

    # 1. Abstract specs from cli
    abstract_specs = spack.cmd.parse_specs(args.spec)
    tests = False
    if args.test == 'all' or args.run_tests:
        tests = True
    elif args.test == 'root':
        tests = [spec.name for spec in abstract_specs]
    kwargs['tests'] = tests

    try:
        specs = spack.cmd.parse_specs(args.spec, concretize=True, tests=tests)
    except SpackError as e:
        tty.debug(e)
        reporter.concretization_report(e.message)
        raise

    # 2. Concrete specs from yaml files
    for file in args.specfiles:
        with open(file, 'r') as f:
            s = spack.spec.Spec.from_yaml(f)

        concretized = s.concretized()
        if concretized.dag_hash() != s.dag_hash():
            msg = 'skipped invalid file "{0}". '
            msg += 'The file does not contain a concrete spec.'
            tty.warn(msg.format(file))
            continue

        abstract_specs.append(s)
        specs.append(concretized)

    if len(specs) == 0:
        tty.die('The `spack install` command requires a spec to install.')

    if not args.log_file and not reporter.filename:
        reporter.filename = default_log_file(specs[0])
    reporter.specs = specs
    with reporter('build'):
        if args.overwrite:

            installed = list(
                filter(lambda x: x, map(spack.store.db.query_one, specs)))
            if not args.yes_to_all:
                display_args = {
                    'long': True,
                    'show_flags': True,
                    'variants': True
                }

                if installed:
                    tty.msg('The following package specs will be '
                            'reinstalled:\n')
                    spack.cmd.display_specs(installed, **display_args)

                not_installed = list(
                    filter(lambda x: x not in installed, specs))
                if not_installed:
                    tty.msg('The following package specs are not installed and'
                            ' the --overwrite flag was given. The package spec'
                            ' will be newly installed:\n')
                    spack.cmd.display_specs(not_installed, **display_args)

                # We have some specs, so one of the above must have been true
                answer = tty.get_yes_or_no('Do you want to proceed?',
                                           default=False)
                if not answer:
                    tty.die('Reinstallation aborted.')

            # overwrite all concrete explicit specs from this build
            kwargs['overwrite'] = [spec.dag_hash() for spec in specs]

        install_specs(args, kwargs, zip(abstract_specs, specs))
Exemplo n.º 57
0
def location(parser, args):
    if args.module_dir:
        print(spack.paths.module_path)
        return

    if args.spack_root:
        print(spack.paths.prefix)
        return

    if args.location_env:
        path = spack.environment.root(args.location_env)
        if not os.path.isdir(path):
            tty.die("no such environment: '%s'" % args.location_env)
        print(path)
        return

    if args.packages:
        print(spack.repo.path.first_repo().root)
        return

    if args.stages:
        print(spack.stage.get_stage_root())
        return

    specs = spack.cmd.parse_specs(args.spec)

    if not specs:
        tty.die("You must supply a spec.")

    if len(specs) != 1:
        tty.die("Too many specs.  Supply only one.")

    # install_dir command matches against installed specs.
    if args.install_dir:
        env = ev.get_env(args, 'location')
        spec = spack.cmd.disambiguate_spec(specs[0], env)
        print(spec.prefix)
        return

    spec = specs[0]

    # Package dir just needs the spec name
    if args.package_dir:
        print(spack.repo.path.dirname_for_package_name(spec.name))
        return

    # Either concretize or filter from already concretized environment
    spec = spack.cmd.matching_spec_from_env(spec)
    pkg = spec.package

    if args.stage_dir:
        print(pkg.stage.path)
        return

    if args.build_dir:
        # Out of source builds have build_directory defined
        if hasattr(pkg, 'build_directory'):
            # build_directory can be either absolute or relative to the stage path
            # in either case os.path.join makes it absolute
            print(
                os.path.normpath(
                    os.path.join(pkg.stage.path, pkg.build_directory)))
            return

        # Otherwise assume in-source builds
        print(pkg.stage.source_path)
        return

    # source dir remains, which requires the spec to be staged
    if not pkg.stage.expanded:
        tty.die(
            "Source directory does not exist yet. "
            "Run this to create it:", "spack stage " + " ".join(args.spec))

    # Default to source dir.
    print(pkg.stage.source_path)
Exemplo n.º 58
0
def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
                  allow_root=False, key=None, regenerate_index=False):
    """
    Build a tarball from given spec and put it into the directory structure
    used at the mirror (following <tarball_directory_name>).
    """
    if not spec.concrete:
        raise ValueError('spec must be concrete to build tarball')

    # set up some paths
    build_cache_dir = build_cache_directory(outdir)

    tarfile_name = tarball_name(spec, '.tar.gz')
    tarfile_dir = os.path.join(build_cache_dir,
                               tarball_directory_name(spec))
    tarfile_path = os.path.join(tarfile_dir, tarfile_name)
    mkdirp(tarfile_dir)
    spackfile_path = os.path.join(
        build_cache_dir, tarball_path_name(spec, '.spack'))
    if os.path.exists(spackfile_path):
        if force:
            os.remove(spackfile_path)
        else:
            raise NoOverwriteException(str(spackfile_path))
    # need to copy the spec file so the build cache can be downloaded
    # without concretizing with the current spack packages
    # and preferences
    spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml")
    specfile_name = tarball_name(spec, '.spec.yaml')
    specfile_path = os.path.realpath(
        os.path.join(build_cache_dir, specfile_name))

    if os.path.exists(specfile_path):
        if force:
            os.remove(specfile_path)
        else:
            raise NoOverwriteException(str(specfile_path))
    # make a copy of the install directory to work with
    workdir = os.path.join(tempfile.mkdtemp(), os.path.basename(spec.prefix))
    install_tree(spec.prefix, workdir, symlinks=True)

    # create info for later relocation and create tar
    write_buildinfo_file(spec.prefix, workdir, rel=rel)

    # optinally make the paths in the binaries relative to each other
    # in the spack install tree before creating tarball
    if rel:
        try:
            make_package_relative(workdir, spec.prefix, allow_root)
        except Exception as e:
            shutil.rmtree(workdir)
            shutil.rmtree(tarfile_dir)
            tty.die(str(e))
    else:
        try:
            make_package_placeholder(workdir, spec.prefix, allow_root)
        except Exception as e:
            shutil.rmtree(workdir)
            shutil.rmtree(tarfile_dir)
            tty.die(str(e))
    # create compressed tarball of the install prefix
    with closing(tarfile.open(tarfile_path, 'w:gz')) as tar:
        tar.add(name='%s' % workdir,
                arcname='%s' % os.path.basename(spec.prefix))
    # remove copy of install directory
    shutil.rmtree(workdir)

    # get the sha256 checksum of the tarball
    checksum = checksum_tarball(tarfile_path)

    # add sha256 checksum to spec.yaml
    spec_dict = {}
    with open(spec_file, 'r') as inputfile:
        content = inputfile.read()
        spec_dict = syaml.load(content)
    bchecksum = {}
    bchecksum['hash_algorithm'] = 'sha256'
    bchecksum['hash'] = checksum
    spec_dict['binary_cache_checksum'] = bchecksum
    # Add original install prefix relative to layout root to spec.yaml.
    # This will be used to determine is the directory layout has changed.
    buildinfo = {}
    buildinfo['relative_prefix'] = os.path.relpath(
        spec.prefix, spack.store.layout.root)
    spec_dict['buildinfo'] = buildinfo
    spec_dict['full_hash'] = spec.full_hash()

    tty.debug('The full_hash ({0}) of {1} will be written into {2}'.format(
        spec_dict['full_hash'], spec.name, specfile_path))
    tty.debug(spec.tree())

    with open(specfile_path, 'w') as outfile:
        outfile.write(syaml.dump(spec_dict))

    # sign the tarball and spec file with gpg
    if not unsigned:
        sign_tarball(key, force, specfile_path)
    # put tarball, spec and signature files in .spack archive
    with closing(tarfile.open(spackfile_path, 'w')) as tar:
        tar.add(name='%s' % tarfile_path, arcname='%s' % tarfile_name)
        tar.add(name='%s' % specfile_path, arcname='%s' % specfile_name)
        if not unsigned:
            tar.add(name='%s.asc' % specfile_path,
                    arcname='%s.asc' % specfile_name)

    # cleanup file moved to archive
    os.remove(tarfile_path)
    if not unsigned:
        os.remove('%s.asc' % specfile_path)

    # create an index.html for the build_cache directory so specs can be found
    if regenerate_index:
        generate_package_index(build_cache_dir)

    return None
Exemplo n.º 59
0
def get_checksums(url_dict, name, **kwargs):
    """Fetches and checksums archives from URLs.

    This function is called by both ``spack checksum`` and ``spack create``.
    The ``first_stage_function`` kwarg allows ``spack create`` to determine
    things like the build system of the archive.

    Args:
        url_dict (dict): A dictionary of the form: version -> URL
        name (str): The name of the package
        first_stage_function (callable): Function to run on first staging area
        keep_stage (bool): Don't clean up staging area when command completes

    Returns:
        str: A multi-line string containing versions and corresponding hashes
    """
    first_stage_function = kwargs.get('first_stage_function', None)
    keep_stage = kwargs.get('keep_stage', False)

    sorted_versions = sorted(url_dict.keys(), reverse=True)

    # Find length of longest string in the list for padding
    max_len = max(len(str(v)) for v in sorted_versions)
    num_ver = len(sorted_versions)

    tty.msg("Found {0} version{1} of {2}:".format(
            num_ver, '' if num_ver == 1 else 's', name),
            "",
            *spack.cmd.elide_list(
                ["{0:{1}}  {2}".format(str(v), max_len, url_dict[v])
                 for v in sorted_versions]))
    print()

    archives_to_fetch = tty.get_number(
        "How many would you like to checksum?", default=1, abort='q')

    if not archives_to_fetch:
        tty.die("Aborted.")

    versions = sorted_versions[:archives_to_fetch]
    urls = [url_dict[v] for v in versions]

    tty.msg("Downloading...")
    version_hashes = []
    i = 0
    for url, version in zip(urls, versions):
        try:
            with Stage(url, keep=keep_stage) as stage:
                # Fetch the archive
                stage.fetch()
                if i == 0 and first_stage_function:
                    # Only run first_stage_function the first time,
                    # no need to run it every time
                    first_stage_function(stage, url)

                # Checksum the archive and add it to the list
                version_hashes.append((version, spack.util.crypto.checksum(
                    hashlib.md5, stage.archive_file)))
                i += 1
        except FailedDownloadError:
            tty.msg("Failed to fetch {0}".format(url))
        except Exception as e:
            tty.msg("Something failed on {0}, skipping.".format(url),
                    "  ({0})".format(e))

    if not version_hashes:
        tty.die("Could not fetch any versions for {0}".format(name))

    # Find length of longest string in the list for padding
    max_len = max(len(str(v)) for v, h in version_hashes)

    # Generate the version directives to put in a package.py
    version_lines = "\n".join([
        "    version('{0}', {1}'{2}')".format(
            v, ' ' * (max_len - len(str(v))), h) for v, h in version_hashes
    ])

    num_hash = len(version_hashes)
    tty.msg("Checksummed {0} version{1} of {2}".format(
        num_hash, '' if num_hash == 1 else 's', name))

    return version_lines
Exemplo n.º 60
0
def install(parser, args, **kwargs):

    if args.help_cdash:
        parser = argparse.ArgumentParser(
            formatter_class=argparse.RawDescriptionHelpFormatter,
            epilog=textwrap.dedent('''\
environment variables:
  SPACK_CDASH_AUTH_TOKEN
                        authentication token to present to CDash
                        '''))
        arguments.add_cdash_args(parser, True)
        parser.print_help()
        return

    # The user wants to monitor builds using github.com/spack/spack-monitor
    if args.use_monitor:
        monitor = spack.monitor.get_client(
            host=args.monitor_host,
            prefix=args.monitor_prefix,
            disable_auth=args.monitor_disable_auth,
            tags=args.monitor_tags,
            save_local=args.monitor_save_local,
        )

    reporter = spack.report.collect_info(spack.package.PackageInstaller,
                                         '_install_task', args.log_format,
                                         args)
    if args.log_file:
        reporter.filename = args.log_file

    if args.run_tests:
        tty.warn("Deprecated option: --run-tests: use --test=all instead")

    def get_tests(specs):
        if args.test == 'all' or args.run_tests:
            return True
        elif args.test == 'root':
            return [spec.name for spec in specs]
        else:
            return False

    # Parse cli arguments and construct a dictionary
    # that will be passed to the package installer
    update_kwargs_from_args(args, kwargs)

    if not args.spec and not args.specfiles:
        # if there are no args but an active environment
        # then install the packages from it.
        env = ev.active_environment()
        if env:
            tests = get_tests(env.user_specs)
            kwargs['tests'] = tests

            if not args.only_concrete:
                with env.write_transaction():
                    concretized_specs = env.concretize(tests=tests)
                    ev.display_specs(concretized_specs)

                    # save view regeneration for later, so that we only do it
                    # once, as it can be slow.
                    env.write(regenerate=False)

            specs = env.all_specs()
            if not args.log_file and not reporter.filename:
                reporter.filename = default_log_file(specs[0])
            reporter.specs = specs

            # Tell the monitor about the specs
            if args.use_monitor and specs:
                monitor.new_configuration(specs)

            tty.msg("Installing environment {0}".format(env.name))
            with reporter('build'):
                env.install_all(**kwargs)

            tty.debug("Regenerating environment views for {0}".format(
                env.name))
            with env.write_transaction():
                # write env to trigger view generation and modulefile
                # generation
                env.write()
            return
        else:
            msg = "install requires a package argument or active environment"
            if 'spack.yaml' in os.listdir(os.getcwd()):
                # There's a spack.yaml file in the working dir, the user may
                # have intended to use that
                msg += "\n\n"
                msg += "Did you mean to install using the `spack.yaml`"
                msg += " in this directory? Try: \n"
                msg += "    spack env activate .\n"
                msg += "    spack install\n"
                msg += "  OR\n"
                msg += "    spack --env . install"
            tty.die(msg)

    if args.no_checksum:
        spack.config.set('config:checksum', False, scope='command_line')

    if args.deprecated:
        spack.config.set('config:deprecated', True, scope='command_line')

    # 1. Abstract specs from cli
    abstract_specs = spack.cmd.parse_specs(args.spec)
    tests = get_tests(abstract_specs)
    kwargs['tests'] = tests

    try:
        specs = spack.cmd.parse_specs(args.spec, concretize=True, tests=tests)
    except SpackError as e:
        tty.debug(e)
        reporter.concretization_report(e.message)
        raise

    # 2. Concrete specs from yaml files
    for file in args.specfiles:
        with open(file, 'r') as f:
            if file.endswith('yaml') or file.endswith('yml'):
                s = spack.spec.Spec.from_yaml(f)
            else:
                s = spack.spec.Spec.from_json(f)

        concretized = s.concretized()
        if concretized.dag_hash() != s.dag_hash():
            msg = 'skipped invalid file "{0}". '
            msg += 'The file does not contain a concrete spec.'
            tty.warn(msg.format(file))
            continue

        abstract_specs.append(s)
        specs.append(concretized)

    if len(specs) == 0:
        tty.die('The `spack install` command requires a spec to install.')

    if not args.log_file and not reporter.filename:
        reporter.filename = default_log_file(specs[0])
    reporter.specs = specs
    with reporter('build'):
        if args.overwrite:

            installed = list(
                filter(lambda x: x, map(spack.store.db.query_one, specs)))
            if not args.yes_to_all:
                display_args = {
                    'long': True,
                    'show_flags': True,
                    'variants': True
                }

                if installed:
                    tty.msg('The following package specs will be '
                            'reinstalled:\n')
                    spack.cmd.display_specs(installed, **display_args)

                not_installed = list(
                    filter(lambda x: x not in installed, specs))
                if not_installed:
                    tty.msg('The following package specs are not installed and'
                            ' the --overwrite flag was given. The package spec'
                            ' will be newly installed:\n')
                    spack.cmd.display_specs(not_installed, **display_args)

                # We have some specs, so one of the above must have been true
                answer = tty.get_yes_or_no('Do you want to proceed?',
                                           default=False)
                if not answer:
                    tty.die('Reinstallation aborted.')

            # overwrite all concrete explicit specs from this build
            kwargs['overwrite'] = [spec.dag_hash() for spec in specs]

        # Update install_args with the monitor args, needed for build task
        kwargs.update({
            "monitor_disable_auth": args.monitor_disable_auth,
            "monitor_keep_going": args.monitor_keep_going,
            "monitor_host": args.monitor_host,
            "use_monitor": args.use_monitor,
            "monitor_prefix": args.monitor_prefix,
        })

        # If we are using the monitor, we send configs. and create build
        # The full_hash is the main package id, the build_hash for others
        if args.use_monitor and specs:
            monitor.new_configuration(specs)
        install_specs(args, kwargs, zip(abstract_specs, specs))