コード例 #1
0
ファイル: info.py プロジェクト: LLNL/cram
def info(parser, args):
    if not args.cramfile:
        tty.error("You must specify a file to display with cram info.")

    with closing(CramFile(args.cramfile, 'r')) as cf:
        if args.all:
            write_header(args, cf)
            print
            print "Job information:"
            for i, job in enumerate(cf):
                print "Job %d:" % i
                write_job_info(job)

        elif args.job is not None:
            if args.job < 0 or args.job >= len(cf):
                tty.die("No job %d in this cram file." % args.job)
            print "Job %d:" % args.job
            for i, job in enumerate(cf):
                if i == args.job:
                    write_job_info(job)
                    break

        else:
            write_header(args, cf)
            print
            write_job_summary(args, cf)
コード例 #2
0
ファイル: install.py プロジェクト: LLNL/spack
def install_spec(cli_args, kwargs, abstract_spec, spec):
    """Do the actual installation."""

    # handle active environment, if any
    def install(spec, kwargs):
        env = ev.get_env(cli_args, 'install', required=False)
        if env:
            env.install(abstract_spec, spec, **kwargs)
            env.write()
        else:
            spec.package.do_install(**kwargs)

    try:
        if cli_args.things_to_install == 'dependencies':
            # Install dependencies as-if they were installed
            # for root (explicit=False in the DB)
            kwargs['explicit'] = False
            for s in spec.dependencies():
                install(s, kwargs)
        else:
            kwargs['explicit'] = True
            install(spec, kwargs)

    except spack.build_environment.InstallError as e:
        if cli_args.show_log_on_error:
            e.print_context()
            if not os.path.exists(e.pkg.build_log_path):
                tty.error("'spack install' created no log.")
            else:
                sys.stderr.write('Full build log:\n')
                with open(e.pkg.build_log_path) as log:
                    shutil.copyfileobj(log, sys.stderr)
        raise
コード例 #3
0
ファイル: uninstall.py プロジェクト: matzke1/spack
def get_uninstall_list(args):
    specs = [any]
    if args.packages:
        specs = spack.cmd.parse_specs(args.packages)

    # Gets the list of installed specs that match the ones give via cli
    # takes care of '-a' is given in the cli
    uninstall_list = find_matching_specs(specs, args.all, args.force)

    # Takes care of '-d'
    dependent_list = installed_dependents(uninstall_list)

    # Process dependent_list and update uninstall_list
    has_error = False
    if dependent_list and not args.dependents and not args.force:
        for spec, lst in dependent_list.items():
            tty.error("Will not uninstall %s" % spec.cformat("$_$@$%@$/"))
            print('')
            print('The following packages depend on it:')
            spack.cmd.display_specs(lst, **display_args)
            print('')
            has_error = True
    elif args.dependents:
        for key, lst in dependent_list.items():
            uninstall_list.extend(lst)
        uninstall_list = list(set(uninstall_list))
    if has_error:
        tty.die('Use `spack uninstall --dependents` '
                'to uninstall these dependencies as well.')

    return uninstall_list
コード例 #4
0
ファイル: compiler.py プロジェクト: alfredo-gimenez/spack
def compiler_info(args):
    """Print info about all compilers matching a spec."""
    cspec = CompilerSpec(args.compiler_spec)
    compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)

    if not compilers:
        tty.error("No compilers match spec %s" % cspec)
    else:
        for c in compilers:
            print str(c.spec) + ":"
            print "\tpaths:"
            for cpath in ['cc', 'cxx', 'f77', 'fc']:
                print "\t\t%s = %s" % (cpath, getattr(c, cpath, None))
            if c.flags:
                print "\tflags:"
                for flag, flag_value in c.flags.iteritems():
                    print "\t\t%s = %s" % (flag, flag_value)
            if len(c.environment) != 0:
                if len(c.environment['set']) != 0:
                    print "\tenvironment:"
                    print "\t    set:"
                    for key, value in c.environment['set'].iteritems():
                        print "\t        %s = %s" % (key, value)
            if c.extra_rpaths:
                print "\tExtra rpaths:"
                for extra_rpath in c.extra_rpaths:
                    print "\t\t%s" % extra_rpath
            print "\tmodules  = %s" % c.modules
            print "\toperating system  = %s" % c.operating_system
コード例 #5
0
ファイル: module.py プロジェクト: AaronTHolt/spack
def module_find(mtype, spec_array):
    """Look at all installed packages and see if the spec provided
       matches any.  If it does, check whether there is a module file
       of type <mtype> there, and print out the name that the user
       should type to use that package's module.
    """
    if mtype not in module_types:
        tty.die("Invalid module type: '%s'.  Options are %s." % (mtype, comma_or(module_types)))

    specs = spack.cmd.parse_specs(spec_array)
    if len(specs) > 1:
        tty.die("You can only pass one spec.")
    spec = specs[0]

    specs = [s for s in spack.db.installed_package_specs() if s.satisfies(spec)]
    if len(specs) == 0:
        tty.die("No installed packages match spec %s" % spec)

    if len(specs) > 1:
        tty.error("Multiple matches for spec %s.  Choose one:" % spec)
        for s in specs:
            sys.stderr.write(s.tree(color=True))
        sys.exit(1)

    mt = module_types[mtype]
    mod = mt(specs[0])
    if not os.path.isfile(mod.file_name):
        tty.die("No %s module is installed for %s." % (mtype, spec))

    print mod.use_name
コード例 #6
0
ファイル: filesystem_view.py プロジェクト: LLNL/spack
    def print_status(self, *specs, **kwargs):
        if kwargs.get("with_dependencies", False):
            specs = set(get_dependencies(specs))

        specs = sorted(specs, key=lambda s: s.name)
        in_view = list(map(self.get_spec, specs))

        for s, v in zip(specs, in_view):
            if not v:
                tty.error(self._croot +
                          'Package not linked: %s' % s.name)
            elif s != v:
                self.print_conflict(v, s, level="warn")

        in_view = list(filter(None, in_view))

        if len(specs) > 0:
            tty.msg("Packages linked in %s:" % self._croot[:-1])

            # avoid circular dependency
            import spack.cmd
            spack.cmd.display_specs(in_view, flags=True, variants=True,
                                    long=self.verbose)
        else:
            tty.warn(self._croot + "No packages found.")
コード例 #7
0
ファイル: url_parse.py プロジェクト: alfredo-gimenez/spack
def url_parse(parser, args):
    url = args.url

    ver,  vs, vl = spack.url.parse_version_offset(url, debug=True)
    name, ns, nl = spack.url.parse_name_offset(url, ver, debug=True)
    print

    tty.msg("Detected:")
    try:
        print_name_and_version(url)
    except spack.url.UrlParseError as e:
        tty.error(str(e))

    print '    name:     %s' % name
    print '    version:  %s' % ver

    print
    tty.msg("Substituting version 9.9.9b:")
    newurl = spack.url.substitute_version(url, '9.9.9b')
    print_name_and_version(newurl)

    if args.spider:
        print
        tty.msg("Spidering for versions:")
        versions = find_versions_of_archive(url)
        for v in sorted(versions):
            print "%-20s%s" % (v, versions[v])
コード例 #8
0
ファイル: dotkit.py プロジェクト: dshrader/spack
def dotkit_find(parser, args):
    if not args.spec:
        parser.parse_args(['dotkit', '-h'])

    spec = spack.cmd.parse_specs(args.spec)
    if len(spec) > 1:
        tty.die("You can only pass one spec.")
    spec = spec[0]

    if not spack.db.exists(spec.name):
        tty.die("No such package: %s" % spec.name)

    specs = [s for s in spack.db.installed_package_specs() if s.satisfies(spec)]

    if len(specs) == 0:
        tty.die("No installed packages match spec %s" % spec)

    if len(specs) > 1:
        tty.error("Multiple matches for spec %s.  Choose one:" % spec)
        for s in specs:
            sys.stderr.write(s.tree(color=True))
        sys.exit(1)

    match = specs[0]
    if not os.path.isfile(spack.hooks.dotkit.dotkit_file(match.package)):
        tty.die("No dotkit is installed for package %s." % spec)

    print match.format('$_$@$+$%@$=$#')
コード例 #9
0
ファイル: uninstall.py プロジェクト: alfredo-gimenez/spack
def concretize_specs(specs, allow_multiple_matches=False, force=False):
    """Returns a list of specs matching the non necessarily
    concretized specs given from cli

    Args:
        specs: list of specs to be matched against installed packages
        allow_multiple_matches : if True multiple matches are admitted

    Return:
        list of specs
    """
    # List of specs that match expressions given via command line
    specs_from_cli = []
    has_errors = False
    for spec in specs:
        matching = spack.store.db.query(spec)
        # For each spec provided, make sure it refers to only one package.
        # Fail and ask user to be unambiguous if it doesn't
        if not allow_multiple_matches and len(matching) > 1:
            tty.error("%s matches multiple packages:" % spec)
            print()
            spack.cmd.display_specs(matching, **display_args)
            print()
            has_errors = True

        # No installed package matches the query
        if len(matching) == 0 and spec is not any:
            tty.error("%s does not match any installed packages." % spec)
            has_errors = True

        specs_from_cli.extend(matching)
    if has_errors:
        tty.die(error_message)

    return specs_from_cli
コード例 #10
0
ファイル: __init__.py プロジェクト: matzke1/spack
def modules_cmd(parser, args, module_type, callbacks=callbacks):

    # Qualifiers to be used when querying the db for specs
    constraint_qualifiers = {
        'refresh': {
            'installed': True,
            'known': True
        },
    }
    query_args = constraint_qualifiers.get(args.subparser_name, {})

    # Get the specs that match the query from the DB
    specs = args.specs(**query_args)

    try:

        callbacks[args.subparser_name](module_type, specs, args)

    except MultipleSpecsMatch:
        msg = "the constraint '{query}' matches multiple packages:\n"
        for s in specs:
            msg += '\t' + s.cformat(format_string='$/ $_$@$+$%@+$+$=') + '\n'
        tty.error(msg.format(query=args.constraint))
        tty.die('In this context exactly **one** match is needed: please specify your constraints better.')  # NOQA: ignore=E501

    except NoSpecMatches:
        msg = "the constraint '{query}' matches no package."
        tty.error(msg.format(query=args.constraint))
        tty.die('In this context exactly **one** match is needed: please specify your constraints better.')  # NOQA: ignore=E501
コード例 #11
0
ファイル: configure.py プロジェクト: alfredo-gimenez/spack
def _stop_at_phase_during_install(args, calling_fn, phase_mapping):
    if not args.package:
        tty.die("configure requires at least one package argument")

    # TODO: to be refactored with code in install
    specs = spack.cmd.parse_specs(args.package, concretize=True)
    if len(specs) != 1:
        tty.error('only one spec can be installed at a time.')
    spec = specs.pop()
    pkg = spec.package
    try:
        key = [cls for cls in phase_mapping if isinstance(pkg, cls)].pop()
        phase = phase_mapping[key]
        # Install package dependencies if needed
        parser = argparse.ArgumentParser()
        inst.setup_parser(parser)
        tty.msg('Checking dependencies for {0}'.format(args.package))
        cli_args = ['-v'] if args.verbose else []
        install_args = parser.parse_args(cli_args + ['--only=dependencies'])
        install_args.package = args.package
        inst.install(parser, install_args)
        # Install package and stop at the given phase
        cli_args = ['-v'] if args.verbose else []
        install_args = parser.parse_args(cli_args + ['--only=package'])
        install_args.package = args.package
        inst.install(parser, install_args, stop_at=phase)
    except IndexError:
        tty.error(
            'Package {0} has no {1} phase, or its {1} phase is not separated from install'.format(  # NOQA: ignore=E501
                spec.name, calling_fn.__name__)
        )
コード例 #12
0
ファイル: error.py プロジェクト: LLNL/spack
    def print_context(self):
        """Print extended debug information about this exception.

        This is usually printed when the top-level Spack error handler
        calls ``die()``, but it can be called separately beforehand if a
        lower-level error handler needs to print error context and
        continue without raising the exception to the top level.
        """
        if self.printed:
            return

        # basic debug message
        tty.error(self.message)
        if self.long_message:
            sys.stderr.write(self.long_message)
            sys.stderr.write('\n')

        # stack trace, etc. in debug mode.
        if debug:
            if self.traceback:
                # exception came from a build child, already got
                # traceback in child, so print it.
                sys.stderr.write(self.traceback)
            else:
                # run parent exception hook.
                sys.excepthook(*sys.exc_info())

        sys.stderr.flush()
        self.printed = True
コード例 #13
0
ファイル: module.py プロジェクト: justintoo/spack
def module(parser, args):
    # Qualifiers to be used when querying the db for specs
    constraint_qualifiers = {
        'refresh': {
            'installed': True,
            'known': True
        },
    }
    query_args = constraint_qualifiers.get(args.subparser_name, {})
    specs = args.specs(**query_args)
    module_type = args.module_type
    constraint = args.constraint
    try:
        callbacks[args.subparser_name](module_type, specs, args)
    except MultipleMatches:
        message = ("the constraint '{query}' matches multiple packages, "
                   "and this is not allowed in this context")
        tty.error(message.format(query=constraint))
        for s in specs:
            sys.stderr.write(s.format(color=True) + '\n')
        raise SystemExit(1)
    except NoMatch:
        message = ("the constraint '{query}' matches no package, "
                   "and this is not allowed in this context")
        tty.die(message.format(query=constraint))
コード例 #14
0
ファイル: mirror.py プロジェクト: LLNL/spack
def mirror_create(args):
    """Create a directory to be used as a spack mirror, and fill it with
       package archives."""
    # try to parse specs from the command line first.
    with spack.concretize.concretizer.disable_compiler_existence_check():
        specs = spack.cmd.parse_specs(args.specs, concretize=True)

        # If there is a file, parse each line as a spec and add it to the list.
        if args.file:
            if specs:
                tty.die("Cannot pass specs on the command line with --file.")
            specs = _read_specs_from_file(args.file)

        # If nothing is passed, use all packages.
        if not specs:
            specs = [Spec(n) for n in spack.repo.all_package_names()]
            specs.sort(key=lambda s: s.format("$_$@").lower())

        # If the user asked for dependencies, traverse spec DAG get them.
        if args.dependencies:
            new_specs = set()
            for spec in specs:
                spec.concretize()
                for s in spec.traverse():
                    new_specs.add(s)
            specs = list(new_specs)

        # Skip external specs, as they are already installed
        external_specs = [s for s in specs if s.external]
        specs = [s for s in specs if not s.external]

        for spec in external_specs:
            msg = 'Skipping {0} as it is an external spec.'
            tty.msg(msg.format(spec.cshort_spec))

        # Default name for directory is spack-mirror-<DATESTAMP>
        directory = args.directory
        if not directory:
            timestamp = datetime.now().strftime("%Y-%m-%d")
            directory = 'spack-mirror-' + timestamp

        # Make sure nothing is in the way.
        existed = os.path.isdir(directory)

        # Actually do the work to create the mirror
        present, mirrored, error = spack.mirror.create(
            directory, specs, num_versions=args.one_version_per_spec)
        p, m, e = len(present), len(mirrored), len(error)

        verb = "updated" if existed else "created"
        tty.msg(
            "Successfully %s mirror in %s" % (verb, directory),
            "Archive stats:",
            "  %-4d already present"  % p,
            "  %-4d added"            % m,
            "  %-4d failed to fetch." % e)
        if error:
            tty.error("Failed downloads:")
            colify(s.cformat("$_$@") for s in error)
コード例 #15
0
ファイル: install.py プロジェクト: justintoo/spack
def install(parser, args, **kwargs):
    if not args.package:
        tty.die("install requires at least one package argument")

    if args.jobs is not None:
        if args.jobs <= 0:
            tty.die("The -j option must be a positive integer!")

    if args.no_checksum:
        spack.do_checksum = False        # TODO: remove this global.

    # Parse cli arguments and construct a dictionary
    # that will be passed to Package.do_install API
    kwargs.update({
        'keep_prefix': args.keep_prefix,
        'keep_stage': args.keep_stage,
        'install_deps': 'dependencies' in args.things_to_install,
        'make_jobs': args.jobs,
        'run_tests': args.run_tests,
        'verbose': args.verbose,
        'fake': args.fake,
        'dirty': args.dirty
    })

    # Spec from cli
    specs = spack.cmd.parse_specs(args.package, concretize=True)
    if len(specs) == 0:
        tty.error('The `spack install` command requires a spec to install.')

    for spec in specs:
        # Check if we were asked to produce some log for dashboards
        if args.log_format is not None:
            # Compute the filename for logging
            log_filename = args.log_file
            if not log_filename:
                log_filename = default_log_file(spec)
            # Create the test suite in which to log results
            test_suite = TestSuite(spec)
            # Decorate PackageBase.do_install to get installation status
            PackageBase.do_install = junit_output(
                spec, test_suite
            )(PackageBase.do_install)

        # Do the actual installation
        if args.things_to_install == 'dependencies':
            # Install dependencies as-if they were installed
            # for root (explicit=False in the DB)
            kwargs['explicit'] = False
            for s in spec.dependencies():
                p = spack.repo.get(s)
                p.do_install(**kwargs)
        else:
            package = spack.repo.get(spec)
            kwargs['explicit'] = True
            package.do_install(**kwargs)

        # Dump log file if asked to
        if args.log_format is not None:
            test_suite.dump(log_filename)
コード例 #16
0
ファイル: view.py プロジェクト: matzke1/spack
def view(parser, args):
    'Produce a view of a set of packages.'

    specs = spack.cmd.parse_specs(args.specs)
    path = args.path[0]

    view = YamlFilesystemView(
        path, spack.store.layout,
        ignore_conflicts=getattr(args, "ignore_conflicts", False),
        link=os.link if args.action in ["hardlink", "hard"]
        else os.symlink,
        verbose=args.verbose)

    # Process common args and specs
    if getattr(args, "all", False):
        specs = view.get_all_specs()
        if len(specs) == 0:
            tty.warn("Found no specs in %s" % path)

    elif args.action in actions_link:
        # only link commands need to disambiguate specs
        specs = [spack.cmd.disambiguate_spec(s) for s in specs]

    elif args.action in actions_status:
        # no specs implies all
        if len(specs) == 0:
            specs = view.get_all_specs()
        else:
            specs = relaxed_disambiguate(specs, view)

    else:
        # status and remove can map the name to packages in view
        specs = relaxed_disambiguate(specs, view)

    with_dependencies = args.dependencies.lower() in ['true', 'yes']

    # Map action to corresponding functionality
    if args.action in actions_link:
        try:
            view.add_specs(*specs,
                           with_dependencies=with_dependencies,
                           exclude=args.exclude)
        except MergeConflictError:
            tty.info("Some file blocked the merge, adding the '-i' flag will "
                     "ignore this conflict. For more information see e.g. "
                     "https://github.com/spack/spack/issues/9029")
            raise

    elif args.action in actions_remove:
        view.remove_specs(*specs,
                          with_dependencies=with_dependencies,
                          exclude=args.exclude,
                          with_dependents=not args.no_remove_dependents)

    elif args.action in actions_status:
        view.print_status(*specs, with_dependencies=with_dependencies)

    else:
        tty.error('Unknown action: "%s"' % args.action)
コード例 #17
0
ファイル: error.py プロジェクト: jgalarowicz/spack
 def die(self):
     if spack.debug:
         sys.excepthook(*sys.exc_info())
         os._exit(1)
     else:
         tty.error(self.message)
         if self.long_message:
             print self.long_message
         os._exit(1)
コード例 #18
0
ファイル: uninstall.py プロジェクト: trws/spack
def uninstall(parser, args):
    if not args.packages:
        tty.die("uninstall requires at least one package argument.")

    with spack.installed_db.write_transaction():
        specs = spack.cmd.parse_specs(args.packages)

        # For each spec provided, make sure it refers to only one package.
        # Fail and ask user to be unambiguous if it doesn't
        pkgs = []
        for spec in specs:
            matching_specs = spack.installed_db.query(spec)
            if not args.all and len(matching_specs) > 1:
                tty.error("%s matches multiple packages:" % spec)
                print
                display_specs(matching_specs, long=True)
                print
                print "You can either:"
                print "  a) Use a more specific spec, or"
                print "  b) use spack uninstall -a to uninstall ALL matching specs."
                sys.exit(1)

            if len(matching_specs) == 0:
                if args.force: continue
                tty.die("%s does not match any installed packages." % spec)

            for s in matching_specs:
                try:
                    # should work if package is known to spack
                    pkgs.append(s.package)

                except spack.packages.UnknownPackageError, e:
                    # The package.py file has gone away -- but still want to
                    # uninstall.
                    spack.Package(s).do_uninstall(force=True)

        # Sort packages to be uninstalled by the number of installed dependents
        # This ensures we do things in the right order
        def num_installed_deps(pkg):
            return len(pkg.installed_dependents)
        pkgs.sort(key=num_installed_deps)

        # Uninstall packages in order now.
        for pkg in pkgs:
            try:
                pkg.do_uninstall(force=args.force)
            except PackageStillNeededError, e:
                tty.error("Will not uninstall %s" % e.spec.format("$_$@$%@$#", color=True))
                print
                print "The following packages depend on it:"
                display_specs(e.dependents, long=True)
                print
                print "You can use spack uninstall -f to force this action."
                sys.exit(1)
コード例 #19
0
ファイル: view.py プロジェクト: alfredo-gimenez/spack
def view(parser, args):
    'Produce a view of a set of packages.'

    # Process common args
    seeds = [spack.cmd.disambiguate_spec(s) for s in args.specs]
    specs = flatten(seeds, args.dependencies.lower() in ['yes', 'true'])
    specs = filter_exclude(specs, args.exclude)

    # Execute the visitation.
    try:
        visitor = globals()['visitor_' + args.action]
    except KeyError:
        tty.error('Unknown action: "%s"' % args.action)
    visitor(specs, args)
コード例 #20
0
ファイル: compiler.py プロジェクト: d-tk/spack
def compiler_info(args):
    """Print info about all compilers matching a spec."""
    cspec = CompilerSpec(args.compiler_spec)
    compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)

    if not compilers:
        tty.error("No compilers match spec %s" % cspec)
    else:
        for c in compilers:
            print str(c.spec) + ":"
            print "\tcc  = %s" % c.cc
            print "\tcxx = %s" % c.cxx
            print "\tf77 = %s" % c.f77
            print "\tfc  = %s" % c.fc
コード例 #21
0
ファイル: diy.py プロジェクト: LLNL/spack
def diy(self, args):
    if not args.spec:
        tty.die("spack diy requires a package spec argument.")

    if args.jobs is not None:
        if args.jobs <= 0:
            tty.die("the -j option must be a positive integer")

    specs = spack.cmd.parse_specs(args.spec)
    if len(specs) > 1:
        tty.die("spack diy only takes one spec.")

    spec = specs[0]
    if not spack.repo.path.exists(spec.name):
        tty.die("No package for '{0}' was found.".format(spec.name),
                "  Use `spack create` to create a new package")

    if not spec.versions.concrete:
        tty.die(
            "spack diy spec must have a single, concrete version. "
            "Did you forget a package version number?")

    spec.concretize()
    package = spack.repo.get(spec)

    if package.installed:
        tty.error("Already installed in %s" % package.prefix)
        tty.msg("Uninstall or try adding a version suffix for this DIY build.")
        sys.exit(1)

    source_path = args.source_path
    if source_path is None:
        source_path = os.getcwd()
    source_path = os.path.abspath(source_path)

    # Forces the build to run out of the current directory.
    package.stage = DIYStage(source_path)

    # disable checksumming if requested
    if args.no_checksum:
        spack.config.set('config:checksum', False, scope='command_line')

    package.do_install(
        make_jobs=args.jobs,
        keep_prefix=args.keep_prefix,
        install_deps=not args.ignore_deps,
        verbose=not args.quiet,
        keep_stage=True,   # don't remove source dir for DIY.
        dirty=args.dirty)
コード例 #22
0
ファイル: compiler.py プロジェクト: d-tk/spack
def compiler_remove(args):
    cspec = CompilerSpec(args.compiler_spec)
    compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)

    if not compilers:
        tty.die("No compilers match spec %s" % cspec)
    elif not args.all and len(compilers) > 1:
        tty.error("Multiple compilers match spec %s. Choose one:" % cspec)
        colify(reversed(sorted([c.spec for c in compilers])), indent=4)
        tty.msg("Or, you can use `spack compiler remove -a` to remove all of them.")
        sys.exit(1)

    for compiler in compilers:
        spack.compilers.remove_compiler_from_config(compiler.spec, scope=args.scope)
        tty.msg("Removed compiler %s" % compiler.spec)
コード例 #23
0
ファイル: error.py プロジェクト: justintoo/spack
    def die(self):
        # basic debug message
        tty.error(self.message)
        if self.long_message:
            print(self.long_message)

        # stack trace, etc. in debug mode.
        if spack.debug:
            if self.traceback:
                # exception came from a build child, already got
                # traceback in child, so print it.
                sys.stderr.write(self.traceback)
            else:
                # run parent exception hook.
                sys.excepthook(*sys.exc_info())

        os._exit(1)
コード例 #24
0
ファイル: mirror.py プロジェクト: mamelara/spack
def mirror_create(args):
    """Create a directory to be used as a spack mirror, and fill it with
       package archives."""
    # try to parse specs from the command line first.
    specs = spack.cmd.parse_specs(args.specs)

    # If there is a file, parse each line as a spec and add it to the list.
    if args.file:
        if specs:
            tty.die("Cannot pass specs on the command line with --file.")
        specs = _read_specs_from_file(args.file)

    # If nothing is passed, use all packages.
    if not specs:
        specs = [Spec(n) for n in spack.db.all_package_names()]
        specs.sort(key=lambda s: s.format("$_$@").lower())

    # Default name for directory is spack-mirror-<DATESTAMP>
    directory = args.directory
    if not directory:
        timestamp = datetime.now().strftime("%Y-%m-%d")
        directory = "spack-mirror-" + timestamp

    # Make sure nothing is in the way.
    existed = False
    if os.path.isfile(directory):
        tty.error("%s already exists and is a file." % directory)
    elif os.path.isdir(directory):
        existed = True

    # Actually do the work to create the mirror
    present, mirrored, error = spack.mirror.create(directory, specs, num_versions=args.one_version_per_spec)
    p, m, e = len(present), len(mirrored), len(error)

    verb = "updated" if existed else "created"
    tty.msg(
        "Successfully %s mirror in %s." % (verb, directory),
        "Archive stats:",
        "  %-4d already present" % p,
        "  %-4d added" % m,
        "  %-4d failed to fetch." % e,
    )
    if error:
        tty.error("Failed downloads:")
        colify(s.format("$_$@") for s in error)
コード例 #25
0
ファイル: module.py プロジェクト: justintoo/spack
def refresh(mtype, specs, args):
    """Regenerate module files for item in specs"""
    # Prompt a message to the user about what is going to change
    if not specs:
        tty.msg('No package matches your query')
        return

    if not args.yes_to_all:
        tty.msg(
            'You are about to regenerate {name} module files for:\n'
            .format(name=mtype))
        spack.cmd.display_specs(specs, long=True)
        print('')
        answer = tty.get_yes_or_no('Do you want to proceed?')
        if not answer:
            tty.die('Will not regenerate any module files')

    cls = module_types[mtype]

    # Detect name clashes
    writers = [cls(spec) for spec in specs
               if spack.repo.exists(spec.name)]  # skip unknown packages.
    file2writer = collections.defaultdict(list)
    for item in writers:
        file2writer[item.file_name].append(item)

    if len(file2writer) != len(writers):
        message = 'Name clashes detected in module files:\n'
        for filename, writer_list in file2writer.items():
            if len(writer_list) > 1:
                message += '\nfile: {0}\n'.format(filename)
                for x in writer_list:
                    message += 'spec: {0}\n'.format(x.spec.format(color=True))
        tty.error(message)
        tty.error('Operation aborted')
        raise SystemExit(1)

    # Proceed regenerating module files
    tty.msg('Regenerating {name} module files'.format(name=mtype))
    if os.path.isdir(cls.path) and args.delete_tree:
        shutil.rmtree(cls.path, ignore_errors=False)
    filesystem.mkdirp(cls.path)
    for x in writers:
        x.write(overwrite=True)
コード例 #26
0
ファイル: buildcache.py プロジェクト: LLNL/spack
def match_downloaded_specs(pkgs, allow_multiple_matches=False, force=False):
    """Returns a list of specs matching the not necessarily
       concretized specs given from cli

    Args:
        specs: list of specs to be matched against buildcaches on mirror
        allow_multiple_matches : if True multiple matches are admitted

    Return:
        list of specs
    """
    # List of specs that match expressions given via command line
    specs_from_cli = []
    has_errors = False
    specs = bindist.get_specs(force)
    for pkg in pkgs:
        matches = []
        tty.msg("buildcache spec(s) matching %s \n" % pkg)
        for spec in sorted(specs):
            if pkg.startswith('/'):
                pkghash = pkg.replace('/', '')
                if spec.dag_hash().startswith(pkghash):
                    matches.append(spec)
            else:
                if spec.satisfies(pkg):
                    matches.append(spec)
        # For each pkg provided, make sure it refers to only one package.
        # Fail and ask user to be unambiguous if it doesn't
        if not allow_multiple_matches and len(matches) > 1:
            tty.error('%s matches multiple downloaded packages:' % pkg)
            for match in matches:
                tty.msg('"%s"' % match.format())
            has_errors = True

        # No downloaded package matches the query
        if len(matches) == 0:
            tty.error('%s does not match any downloaded packages.' % pkg)
            has_errors = True

        specs_from_cli.extend(matches)
    if has_errors:
        tty.die('use one of the matching specs above')

    return specs_from_cli
コード例 #27
0
ファイル: __init__.py プロジェクト: LLNL/cram
def run(names, verbose=False):
    """Run tests with the supplied names.  Names should be a list.  If
       it's empty, run ALL of the tests."""
    verbosity = 1 if not verbose else 2

    # If they didn't provide names of tests to run, then use the default
    # list above.
    if not names:
        names = _test_names
    else:
        for test in names:
            if test not in _test_names:
                tty.error("%s is not a valid test name." % test,
                          "Valid names are:")
                colify(_test_names, indent=4)
                sys.exit(1)

    runner = unittest.TextTestRunner(verbosity=verbosity)

    testsRun = errors = failures = skipped = 0
    for test in names:
        module = _test_module + '.' + test
        print module, test
        suite = unittest.defaultTestLoader.loadTestsFromName(module)

        tty.msg("Running test: %s" % test)
        result = runner.run(suite)
        testsRun += result.testsRun
        errors   += len(result.errors)
        failures += len(result.failures)
        skipped  += len(result.skipped)

    succeeded = not errors and not failures
    tty.msg("Tests Complete.",
            "%5d tests run" % testsRun,
            "%5d skipped" % skipped,
            "%5d failures" % failures,
            "%5d errors" % errors)

    if not errors and not failures:
        tty.info("OK", format='g')
    else:
        tty.info("FAIL", format='r')
        sys.exit(1)
コード例 #28
0
ファイル: diy.py プロジェクト: Exteris/spack
def diy(self, args):
    if not args.spec:
        tty.die("spack diy requires a package spec argument.")

    specs = spack.cmd.parse_specs(args.spec)
    if len(specs) > 1:
        tty.die("spack diy only takes one spec.")

    # Take a write lock before checking for existence.
    with spack.installed_db.write_transaction():
        spec = specs[0]
        if not spack.repo.exists(spec.name):
            tty.warn("No such package: %s" % spec.name)
            create = tty.get_yes_or_no("Create this package?", default=False)
            if not create:
                tty.msg("Exiting without creating.")
                sys.exit(1)
            else:
                tty.msg("Running 'spack edit -f %s'" % spec.name)
                edit_package(spec.name, spack.repo.first_repo(), None, True)
                return

        if not spec.versions.concrete:
            tty.die("spack diy spec must have a single, concrete version.  Did you forget a package version number?")

        spec.concretize()
        package = spack.repo.get(spec)

        if package.installed:
            tty.error("Already installed in %s" % package.prefix)
            tty.msg("Uninstall or try adding a version suffix for this DIY build.")
            sys.exit(1)

        # Forces the build to run out of the current directory.
        package.stage = DIYStage(os.getcwd())

        # TODO: make this an argument, not a global.
        spack.do_checksum = False

        package.do_install(
            keep_prefix=args.keep_prefix,
            ignore_deps=args.ignore_deps,
            verbose=not args.quiet,
            keep_stage=True)   # don't remove source dir for DIY.
コード例 #29
0
ファイル: uninstall.py プロジェクト: LLNL/spack
def find_matching_specs(env, specs, allow_multiple_matches=False, force=False):
    """Returns a list of specs matching the not necessarily
       concretized specs given from cli

    Args:
        env (Environment): active environment, or ``None`` if there is not one
        specs (list): list of specs to be matched against installed packages
        allow_multiple_matches (bool): if True multiple matches are admitted

    Return:
        list of specs
    """
    # constrain uninstall resolution to current environment if one is active
    hashes = env.all_hashes() if env else None

    # List of specs that match expressions given via command line
    specs_from_cli = []
    has_errors = False
    for spec in specs:
        matching = spack.store.db.query(spec, hashes=hashes)
        # For each spec provided, make sure it refers to only one package.
        # Fail and ask user to be unambiguous if it doesn't
        if not allow_multiple_matches and len(matching) > 1:
            tty.error('{0} matches multiple packages:'.format(spec))
            print()
            spack.cmd.display_specs(matching, **display_args)
            print()
            has_errors = True

        # No installed package matches the query
        if len(matching) == 0 and spec is not any:
            if env:
                pkg_type = "packages in environment '%s'" % env.name
            else:
                pkg_type = 'installed packages'
            tty.die('{0} does not match any {1}.'.format(spec, pkg_type))

        specs_from_cli.extend(matching)

    if has_errors:
        tty.die(error_message)

    return specs_from_cli
コード例 #30
0
ファイル: python_version.py プロジェクト: jgalarowicz/spack
    def check_python_versions(self, *files):
        # dict version -> filename -> reasons
        all_issues = {}

        for fn in files:
            if fn != '/Users/gamblin2/src/spack/var/spack/packages/vim/package.py':
                continue
            print fn

            with open(fn) as pyfile:
                versions = pyqver2.get_versions(pyfile.read())
                for ver, reasons in versions.items():
                    if ver > spack_max_version:
                        if not ver in all_issues:
                            all_issues[ver] = {}
                        all_issues[ver][fn] = reasons

        if all_issues:
            tty.error("Spack must run on Python version %d.%d"
                      % spack_max_version)

        for v in sorted(all_issues.keys(), reverse=True):
            msgs = []
            for fn in sorted(all_issues[v].keys()):
                short_fn = fn
                if fn.startswith(spack.prefix):
                    short_fn = fn[len(spack.prefix):]

                reasons = [r for r in set(all_issues[v][fn]) if r]
                for r in reasons:
                    msgs.append(("%s:%s" % ('spack' + short_fn, r[0]), r[1]))

            tty.error("These files require version %d.%d:" % v)
            maxlen = max(len(f) for f, prob in msgs)
            fmt = "%%-%ds%%s" % (maxlen+3)
            print fmt % ('File', 'Reason')
            print fmt % ('-' * (maxlen), '-' * 20)
            for msg in msgs:
                print fmt % msg

        self.assertTrue(len(all_issues) == 0)
コード例 #31
0
    try:
        specs = spack.spec.parse(args)
        for spec in specs:
            if concretize:
                spec.concretize()  # implies normalize
            elif normalize:
                spec.normalize()

        return specs

    except spack.parse.ParseError, e:
        tty.error(e.message, e.string, e.pos * " " + "^")
        sys.exit(1)

    except spack.spec.SpecError, e:
        tty.error(e.message)
        sys.exit(1)


def elide_list(line_list, max_num=10):
    """Takes a long list and limits it to a smaller number of elements,
       replacing intervening elements with '...'.  For example::

           elide_list([1,2,3,4,5,6], 4)

       gives::

           [1, 2, 3, '...', 6]
    """
    if len(line_list) > max_num:
        return line_list[:max_num - 1] + ['...'] + line_list[-1:]
コード例 #32
0
def upload_spec(args):
    """Upload a spec to s3 bucket"""
    if not args.spec and not args.spec_yaml:
        tty.error('Cannot upload spec without spec arg or path to spec yaml')
        sys.exit(1)

    if not args.base_dir:
        tty.error('No base directory for buildcache specified')
        sys.exit(1)

    if args.spec:
        try:
            spec = Spec(args.spec)
            spec.concretize()
        except Exception as e:
            tty.debug(e)
            tty.error('Unable to concrectize spec from string {0}'.format(
                args.spec))
            sys.exit(1)
    else:
        try:
            with open(args.spec_yaml, 'r') as fd:
                spec = Spec.from_yaml(fd.read())
        except Exception as e:
            tty.debug(e)
            tty.error('Unable to concrectize spec from yaml {0}'.format(
                args.spec_yaml))
            sys.exit(1)

    s3, bucket_name = get_s3_session(args.endpoint_url)

    build_cache_dir = bindist.build_cache_relative_path()

    tarball_key = os.path.join(
        build_cache_dir, bindist.tarball_path_name(spec, '.spack'))
    tarball_path = os.path.join(args.base_dir, tarball_key)

    specfile_key = os.path.join(
        build_cache_dir, bindist.tarball_name(spec, '.spec.yaml'))
    specfile_path = os.path.join(args.base_dir, specfile_key)

    cdashidfile_key = os.path.join(
        build_cache_dir, bindist.tarball_name(spec, '.cdashid'))
    cdashidfile_path = os.path.join(args.base_dir, cdashidfile_key)

    tty.msg('Uploading {0}'.format(tarball_key))
    s3.meta.client.upload_file(
        tarball_path, bucket_name,
        os.path.join('mirror', tarball_key),
        ExtraArgs={'ACL': 'public-read'})

    tty.msg('Uploading {0}'.format(specfile_key))
    s3.meta.client.upload_file(
        specfile_path, bucket_name,
        os.path.join('mirror', specfile_key),
        ExtraArgs={'ACL': 'public-read'})

    if os.path.exists(cdashidfile_path):
        tty.msg('Uploading {0}'.format(cdashidfile_key))
        s3.meta.client.upload_file(
            cdashidfile_path, bucket_name,
            os.path.join('mirror', cdashidfile_key),
            ExtraArgs={'ACL': 'public-read'})
コード例 #33
0
def update_index(args):
    """Update the index of an s3 buildcache"""
    s3, bucket_name = get_s3_session(args.endpoint_url)

    bucket = s3.Bucket(bucket_name)
    exists = True

    try:
        s3.meta.client.head_bucket(Bucket=bucket_name)
    except botocore.exceptions.ClientError as e:
        # If a client error is thrown, then check that it was a 404 error.
        # If it was a 404 error, then the bucket does not exist.
        error_code = e.response['Error']['Code']
        if error_code == '404':
            exists = False

    if not exists:
        tty.error('S3 bucket "{0}" does not exist'.format(bucket_name))
        sys.exit(1)

    build_cache_dir = os.path.join(
        'mirror', bindist.build_cache_relative_path())

    spec_yaml_regex = re.compile('{0}/(.+\\.spec\\.yaml)$'.format(
        build_cache_dir))
    spack_regex = re.compile('{0}/([^/]+)/.+\\.spack$'.format(
        build_cache_dir))

    top_level_keys = set()

    for key in bucket.objects.all():
        m = spec_yaml_regex.search(key.key)
        if m:
            top_level_keys.add(m.group(1))
            print(m.group(1))
            continue

        m = spack_regex.search(key.key)
        if m:
            top_level_keys.add(m.group(1))
            print(m.group(1))
            continue

    index_data = {
        'top_level_keys': top_level_keys,
    }

    env = template_engine.make_environment()
    template_dir = 'misc'
    index_template = os.path.join(template_dir, 'buildcache_index.html')
    t = env.get_template(index_template)
    contents = t.render(index_data)

    index_key = os.path.join(build_cache_dir, 'index.html')

    tty.debug('Generated index:')
    tty.debug(contents)
    tty.debug('Pushing it to {0} -> {1}'.format(bucket_name, index_key))

    s3_obj = s3.Object(bucket_name, index_key)
    s3_obj.put(Body=contents, ACL='public-read')
コード例 #34
0
def copy_fn(args):
    """Copy a buildcache entry and all its files from one mirror, given as
    '--base-dir', to some other mirror, specified as '--destination-url'.
    The specific buildcache entry to be copied from one location to the
    other is identified using the '--spec-file' argument."""
    # TODO: Remove after v0.18.0 release
    msg = ('"spack buildcache copy" is deprecated and will be removed from '
           'Spack starting in v0.19.0')
    warnings.warn(msg)

    if not args.spec_file:
        tty.msg('No spec yaml provided, exiting.')
        sys.exit(1)

    if not args.base_dir:
        tty.msg('No base directory provided, exiting.')
        sys.exit(1)

    if not args.destination_url:
        tty.msg('No destination mirror url provided, exiting.')
        sys.exit(1)

    dest_url = args.destination_url

    if dest_url[0:7] != 'file://' and dest_url[0] != '/':
        tty.msg('Only urls beginning with "file://" or "/" are supported ' +
                'by buildcache copy.')
        sys.exit(1)

    try:
        with open(args.spec_file, 'r') as fd:
            spec = Spec.from_yaml(fd.read())
    except Exception as e:
        tty.debug(e)
        tty.error('Unable to concrectize spec from yaml {0}'.format(
            args.spec_file))
        sys.exit(1)

    dest_root_path = dest_url
    if dest_url[0:7] == 'file://':
        dest_root_path = dest_url[7:]

    build_cache_dir = bindist.build_cache_relative_path()

    tarball_rel_path = os.path.join(build_cache_dir,
                                    bindist.tarball_path_name(spec, '.spack'))
    tarball_src_path = os.path.join(args.base_dir, tarball_rel_path)
    tarball_dest_path = os.path.join(dest_root_path, tarball_rel_path)

    specfile_rel_path = os.path.join(build_cache_dir,
                                     bindist.tarball_name(spec, '.spec.json'))
    specfile_src_path = os.path.join(args.base_dir, specfile_rel_path)
    specfile_dest_path = os.path.join(dest_root_path, specfile_rel_path)

    specfile_rel_path_yaml = os.path.join(
        build_cache_dir, bindist.tarball_name(spec, '.spec.yaml'))
    specfile_src_path_yaml = os.path.join(args.base_dir, specfile_rel_path)
    specfile_dest_path_yaml = os.path.join(dest_root_path, specfile_rel_path)

    # Make sure directory structure exists before attempting to copy
    os.makedirs(os.path.dirname(tarball_dest_path))

    # Now copy the specfile and tarball files to the destination mirror
    tty.msg('Copying {0}'.format(tarball_rel_path))
    shutil.copyfile(tarball_src_path, tarball_dest_path)

    tty.msg('Copying {0}'.format(specfile_rel_path))
    shutil.copyfile(specfile_src_path, specfile_dest_path)

    tty.msg('Copying {0}'.format(specfile_rel_path_yaml))
    shutil.copyfile(specfile_src_path_yaml, specfile_dest_path_yaml)
コード例 #35
0
ファイル: main.py プロジェクト: vsoch/spack
def main(argv=None):
    """This is the entry point for the Spack command.

    Args:
        argv (list of str or None): command line arguments, NOT including
            the executable name. If None, parses from sys.argv.
    """
    # Create a parser with a simple positional argument first.  We'll
    # lazily load the subcommand(s) we need later. This allows us to
    # avoid loading all the modules from spack.cmd when we don't need
    # them, which reduces startup latency.
    parser = make_argument_parser()
    parser.add_argument('command', nargs=argparse.REMAINDER)
    args, unknown = parser.parse_known_args(argv)

    # Recover stored LD_LIBRARY_PATH variables from spack shell function
    # This is necessary because MacOS System Integrity Protection clears
    # (DY?)LD_LIBRARY_PATH variables on process start.
    # Spack clears these variables before building and installing packages,
    # but needs to know the prior state for commands like `spack load` and
    # `spack env activate that modify the user environment.
    recovered_vars = (
        'LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH', 'DYLD_FALLBACK_LIBRARY_PATH'
    )
    for var in recovered_vars:
        stored_var_name = 'SPACK_%s' % var
        if stored_var_name in os.environ:
            os.environ[var] = os.environ[stored_var_name]

    # make spack.config aware of any command line configuration scopes
    if args.config_scopes:
        spack.config.command_line_scopes = args.config_scopes

    # activate an environment if one was specified on the command line
    if not args.no_env:
        env = ev.find_environment(args)
        if env:
            ev.activate(env, args.use_env_repo, add_view=False)

    if args.print_shell_vars:
        print_setup_info(*args.print_shell_vars.split(','))
        return 0

    # Just print help and exit if run with no arguments at all
    no_args = (len(sys.argv) == 1) if argv is None else (len(argv) == 0)
    if no_args:
        parser.print_help()
        return 1

    # -h, -H, and -V are special as they do not require a command, but
    # all the other options do nothing without a command.
    if args.version:
        print(get_version())
        return 0
    elif args.help:
        sys.stdout.write(parser.format_help(level=args.help))
        return 0
    elif not args.command:
        parser.print_help()
        return 1

    try:
        # ensure options on spack command come before everything
        setup_main_options(args)

        # Try to load the particular command the caller asked for.
        cmd_name = args.command[0]
        cmd_name = aliases.get(cmd_name, cmd_name)

        command = parser.add_command(cmd_name)

        # Re-parse with the proper sub-parser added.
        args, unknown = parser.parse_known_args()

        # many operations will fail without a working directory.
        set_working_dir()

        # now we can actually execute the command.
        if args.spack_profile or args.sorted_profile:
            _profile_wrapper(command, parser, args, unknown)
        elif args.pdb:
            import pdb
            pdb.runctx('_invoke_command(command, parser, args, unknown)',
                       globals(), locals())
            return 0
        else:
            return _invoke_command(command, parser, args, unknown)

    except SpackError as e:
        tty.debug(e)
        e.die()  # gracefully die on any SpackErrors

    except KeyboardInterrupt:
        if spack.config.get('config:debug'):
            raise
        sys.stderr.write('\n')
        tty.error("Keyboard interrupt.")
        return signal.SIGINT.value

    except SystemExit as e:
        if spack.config.get('config:debug'):
            traceback.print_exc()
        return e.code

    except Exception as e:
        if spack.config.get('config:debug'):
            raise
        tty.error(e)
        return 3
コード例 #36
0
def ci_rebuild(args):
    """Check a single spec against the remote mirror, and rebuild it from
       source if the mirror does not contain the full hash match of the spec
       as computed locally. """
    env = ev.get_env(args, 'ci rebuild', required=True)

    # Make sure the environment is "gitlab-enabled", or else there's nothing
    # to do.
    yaml_root = ev.config_dict(env.yaml)
    gitlab_ci = None
    if 'gitlab-ci' in yaml_root:
        gitlab_ci = yaml_root['gitlab-ci']

    if not gitlab_ci:
        tty.die('spack ci rebuild requires an env containing gitlab-ci cfg')

    # Grab the environment variables we need.  These either come from the
    # pipeline generation step ("spack ci generate"), where they were written
    # out as variables, or else provided by GitLab itself.
    pipeline_artifacts_dir = get_env_var('SPACK_ARTIFACTS_ROOT')
    job_log_dir = get_env_var('SPACK_JOB_LOG_DIR')
    repro_dir = get_env_var('SPACK_JOB_REPRO_DIR')
    local_mirror_dir = get_env_var('SPACK_LOCAL_MIRROR_DIR')
    concrete_env_dir = get_env_var('SPACK_CONCRETE_ENV_DIR')
    ci_pipeline_id = get_env_var('CI_PIPELINE_ID')
    ci_job_name = get_env_var('CI_JOB_NAME')
    signing_key = get_env_var('SPACK_SIGNING_KEY')
    root_spec = get_env_var('SPACK_ROOT_SPEC')
    job_spec_pkg_name = get_env_var('SPACK_JOB_SPEC_PKG_NAME')
    compiler_action = get_env_var('SPACK_COMPILER_ACTION')
    cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
    related_builds = get_env_var('SPACK_RELATED_BUILDS_CDASH')
    spack_pipeline_type = get_env_var('SPACK_PIPELINE_TYPE')
    pr_mirror_url = get_env_var('SPACK_PR_MIRROR_URL')
    remote_mirror_url = get_env_var('SPACK_REMOTE_MIRROR_URL')

    # Construct absolute paths relative to current $CI_PROJECT_DIR
    ci_project_dir = get_env_var('CI_PROJECT_DIR')
    pipeline_artifacts_dir = os.path.join(ci_project_dir,
                                          pipeline_artifacts_dir)
    job_log_dir = os.path.join(ci_project_dir, job_log_dir)
    repro_dir = os.path.join(ci_project_dir, repro_dir)
    local_mirror_dir = os.path.join(ci_project_dir, local_mirror_dir)
    concrete_env_dir = os.path.join(ci_project_dir, concrete_env_dir)

    # Debug print some of the key environment variables we should have received
    tty.debug('pipeline_artifacts_dir = {0}'.format(pipeline_artifacts_dir))
    tty.debug('root_spec = {0}'.format(root_spec))
    tty.debug('remote_mirror_url = {0}'.format(remote_mirror_url))
    tty.debug('job_spec_pkg_name = {0}'.format(job_spec_pkg_name))
    tty.debug('compiler_action = {0}'.format(compiler_action))

    # Query the environment manifest to find out whether we're reporting to a
    # CDash instance, and if so, gather some information from the manifest to
    # support that task.
    enable_cdash = False
    if 'cdash' in yaml_root:
        enable_cdash = True
        ci_cdash = yaml_root['cdash']
        job_spec_buildgroup = ci_cdash['build-group']
        cdash_base_url = ci_cdash['url']
        cdash_project = ci_cdash['project']
        proj_enc = urlencode({'project': cdash_project})
        eq_idx = proj_enc.find('=') + 1
        cdash_project_enc = proj_enc[eq_idx:]
        cdash_site = ci_cdash['site']
        tty.debug('cdash_base_url = {0}'.format(cdash_base_url))
        tty.debug('cdash_project = {0}'.format(cdash_project))
        tty.debug('cdash_project_enc = {0}'.format(cdash_project_enc))
        tty.debug('cdash_build_name = {0}'.format(cdash_build_name))
        tty.debug('cdash_site = {0}'.format(cdash_site))
        tty.debug('related_builds = {0}'.format(related_builds))
        tty.debug('job_spec_buildgroup = {0}'.format(job_spec_buildgroup))

    # Is this a pipeline run on a spack PR or a merge to develop?  It might
    # be neither, e.g. a pipeline run on some environment repository.
    spack_is_pr_pipeline = spack_pipeline_type == 'spack_pull_request'
    spack_is_develop_pipeline = spack_pipeline_type == 'spack_protected_branch'

    tty.debug('Pipeline type - PR: {0}, develop: {1}'.format(
        spack_is_pr_pipeline, spack_is_develop_pipeline))

    # Figure out what is our temporary storage mirror: Is it artifacts
    # buildcache?  Or temporary-storage-url-prefix?  In some cases we need to
    # force something or pipelines might not have a way to propagate build
    # artifacts from upstream to downstream jobs.
    pipeline_mirror_url = None

    temp_storage_url_prefix = None
    if 'temporary-storage-url-prefix' in gitlab_ci:
        temp_storage_url_prefix = gitlab_ci['temporary-storage-url-prefix']
        pipeline_mirror_url = url_util.join(temp_storage_url_prefix,
                                            ci_pipeline_id)

    enable_artifacts_mirror = False
    if 'enable-artifacts-buildcache' in gitlab_ci:
        enable_artifacts_mirror = gitlab_ci['enable-artifacts-buildcache']
        if (enable_artifacts_mirror
                or (spack_is_pr_pipeline and not enable_artifacts_mirror
                    and not temp_storage_url_prefix)):
            # If you explicitly enabled the artifacts buildcache feature, or
            # if this is a PR pipeline but you did not enable either of the
            # per-pipeline temporary storage features, we force the use of
            # artifacts buildcache.  Otherwise jobs will not have binary
            # dependencies from previous stages available since we do not
            # allow pushing binaries to the remote mirror during PR pipelines.
            enable_artifacts_mirror = True
            pipeline_mirror_url = 'file://' + local_mirror_dir
            mirror_msg = 'artifact buildcache enabled, mirror url: {0}'.format(
                pipeline_mirror_url)
            tty.debug(mirror_msg)

    # Whatever form of root_spec we got, use it to get a map giving us concrete
    # specs for this job and all of its dependencies.
    spec_map = spack_ci.get_concrete_specs(env, root_spec, job_spec_pkg_name,
                                           related_builds, compiler_action)
    job_spec = spec_map[job_spec_pkg_name]

    job_spec_yaml_file = '{0}.yaml'.format(job_spec_pkg_name)
    job_spec_yaml_path = os.path.join(repro_dir, job_spec_yaml_file)

    # To provide logs, cdash reports, etc for developer download/perusal,
    # these things have to be put into artifacts.  This means downstream
    # jobs that "need" this job will get those artifacts too.  So here we
    # need to clean out the artifacts we may have got from upstream jobs.

    cdash_report_dir = os.path.join(pipeline_artifacts_dir, 'cdash_report')
    if os.path.exists(cdash_report_dir):
        shutil.rmtree(cdash_report_dir)

    if os.path.exists(job_log_dir):
        shutil.rmtree(job_log_dir)

    if os.path.exists(repro_dir):
        shutil.rmtree(repro_dir)

    # Now that we removed them if they existed, create the directories we
    # need for storing artifacts.  The cdash_report directory will be
    # created internally if needed.
    os.makedirs(job_log_dir)
    os.makedirs(repro_dir)

    # Copy the concrete environment files to the repro directory so we can
    # expose them as artifacts and not conflict with the concrete environment
    # files we got as artifacts from the upstream pipeline generation job.
    # Try to cast a slightly wider net too, and hopefully get the generated
    # pipeline yaml.  If we miss it, the user will still be able to go to the
    # pipeline generation job and get it from there.
    target_dirs = [concrete_env_dir, pipeline_artifacts_dir]

    for dir_to_list in target_dirs:
        for file_name in os.listdir(dir_to_list):
            src_file = os.path.join(dir_to_list, file_name)
            if os.path.isfile(src_file):
                dst_file = os.path.join(repro_dir, file_name)
                shutil.copyfile(src_file, dst_file)

    # If signing key was provided via "SPACK_SIGNING_KEY", then try to
    # import it.
    if signing_key:
        spack_ci.import_signing_key(signing_key)

    # Depending on the specifics of this job, we might need to turn on the
    # "config:install_missing compilers" option (to build this job spec
    # with a bootstrapped compiler), or possibly run "spack compiler find"
    # (to build a bootstrap compiler or one of its deps in a
    # compiler-agnostic way), or maybe do nothing at all (to build a spec
    # using a compiler already installed on the target system).
    spack_ci.configure_compilers(compiler_action)

    # Write this job's spec yaml into the reproduction directory, and it will
    # also be used in the generated "spack install" command to install the spec
    tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path))
    with open(job_spec_yaml_path, 'w') as fd:
        fd.write(job_spec.to_yaml(hash=ht.build_hash))

    # Write the concrete root spec yaml into the reproduction directory
    root_spec_yaml_path = os.path.join(repro_dir, 'root.yaml')
    with open(root_spec_yaml_path, 'w') as fd:
        fd.write(spec_map['root'].to_yaml(hash=ht.build_hash))

    # Write some other details to aid in reproduction into an artifact
    repro_file = os.path.join(repro_dir, 'repro.json')
    repro_details = {
        'job_name': ci_job_name,
        'job_spec_yaml': job_spec_yaml_file,
        'root_spec_yaml': 'root.yaml',
        'ci_project_dir': ci_project_dir
    }
    with open(repro_file, 'w') as fd:
        fd.write(json.dumps(repro_details))

    # Write information about spack into an artifact in the repro dir
    spack_info = spack_ci.get_spack_info()
    spack_info_file = os.path.join(repro_dir, 'spack_info.txt')
    with open(spack_info_file, 'w') as fd:
        fd.write('\n{0}\n'.format(spack_info))

    # If we decided there should be a temporary storage mechanism, add that
    # mirror now so it's used when we check for a full hash match already
    # built for this spec.
    if pipeline_mirror_url:
        spack.mirror.add(spack_ci.TEMP_STORAGE_MIRROR_NAME,
                         pipeline_mirror_url, cfg.default_modify_scope())

    cdash_build_id = None
    cdash_build_stamp = None

    # Check configured mirrors for a built spec with a matching full hash
    matches = bindist.get_mirrors_for_spec(job_spec,
                                           full_hash_match=True,
                                           index_only=False)

    if matches:
        # Got a full hash match on at least one configured mirror.  All
        # matches represent the fully up-to-date spec, so should all be
        # equivalent.  If artifacts mirror is enabled, we just pick one
        # of the matches and download the buildcache files from there to
        # the artifacts, so they're available to be used by dependent
        # jobs in subsequent stages.
        tty.msg('No need to rebuild {0}, found full hash match at: '.format(
            job_spec_pkg_name))
        for match in matches:
            tty.msg('    {0}'.format(match['mirror_url']))
        if enable_artifacts_mirror:
            matching_mirror = matches[0]['mirror_url']
            build_cache_dir = os.path.join(local_mirror_dir, 'build_cache')
            tty.debug('Getting {0} buildcache from {1}'.format(
                job_spec_pkg_name, matching_mirror))
            tty.debug('Downloading to {0}'.format(build_cache_dir))
            buildcache.download_buildcache_files(job_spec, build_cache_dir,
                                                 False, matching_mirror)

        # Now we are done and successful
        sys.exit(0)

    # No full hash match anywhere means we need to rebuild spec

    # Start with spack arguments
    install_args = [base_arg for base_arg in CI_REBUILD_INSTALL_BASE_ARGS]

    config = cfg.get('config')
    if not config['verify_ssl']:
        install_args.append('-k')

    install_args.extend([
        'install',
        '--keep-stage',
        '--require-full-hash-match',
    ])

    can_verify = spack_ci.can_verify_binaries()
    verify_binaries = can_verify and spack_is_pr_pipeline is False
    if not verify_binaries:
        install_args.append('--no-check-signature')

    # If CDash reporting is enabled, we first register this build with
    # the specified CDash instance, then relate the build to those of
    # its dependencies.
    if enable_cdash:
        tty.debug('CDash: Registering build')
        (cdash_build_id, cdash_build_stamp) = spack_ci.register_cdash_build(
            cdash_build_name, cdash_base_url, cdash_project, cdash_site,
            job_spec_buildgroup)

        if cdash_build_id is not None:
            cdash_upload_url = '{0}/submit.php?project={1}'.format(
                cdash_base_url, cdash_project_enc)

            install_args.extend([
                '--cdash-upload-url',
                cdash_upload_url,
                '--cdash-build',
                cdash_build_name,
                '--cdash-site',
                cdash_site,
                '--cdash-buildstamp',
                cdash_build_stamp,
            ])

            tty.debug('CDash: Relating build with dependency builds')
            spack_ci.relate_cdash_builds(
                spec_map, cdash_base_url, cdash_build_id, cdash_project,
                [pipeline_mirror_url, pr_mirror_url, remote_mirror_url])

    # A compiler action of 'FIND_ANY' means we are building a bootstrap
    # compiler or one of its deps.
    # TODO: when compilers are dependencies, we should include --no-add
    if compiler_action != 'FIND_ANY':
        install_args.append('--no-add')

    # TODO: once we have the concrete spec registry, use the DAG hash
    # to identify the spec to install, rather than the concrete spec
    # yaml file.
    install_args.extend(['-f', job_spec_yaml_path])

    tty.debug('Installing {0} from source'.format(job_spec.name))
    tty.debug('spack install arguments: {0}'.format(install_args))

    # Write the install command to a shell script
    with open('install.sh', 'w') as fd:
        fd.write('#!/bin/bash\n\n')
        fd.write('\n# spack install command\n')
        fd.write(' '.join(['"{0}"'.format(i) for i in install_args]))
        fd.write('\n')

    st = os.stat('install.sh')
    os.chmod('install.sh', st.st_mode | stat.S_IEXEC)

    install_copy_path = os.path.join(repro_dir, 'install.sh')
    shutil.copyfile('install.sh', install_copy_path)

    # Run the generated install.sh shell script as if it were being run in
    # a login shell.
    try:
        install_process = subprocess.Popen(['bash', '-l', './install.sh'])
        install_process.wait()
        install_exit_code = install_process.returncode
    except (ValueError, subprocess.CalledProcessError, OSError) as inst:
        tty.error('Encountered error running install script')
        tty.error(inst)

    # Now do the post-install tasks
    tty.debug('spack install exited {0}'.format(install_exit_code))

    # If a spec fails to build in a spack develop pipeline, we add it to a
    # list of known broken full hashes.  This allows spack PR pipelines to
    # avoid wasting compute cycles attempting to build those hashes.
    if install_exit_code == 1 and spack_is_develop_pipeline:
        tty.debug('Install failed on develop')
        if 'broken-specs-url' in gitlab_ci:
            broken_specs_url = gitlab_ci['broken-specs-url']
            dev_fail_hash = job_spec.full_hash()
            broken_spec_path = url_util.join(broken_specs_url, dev_fail_hash)
            tty.msg('Reporting broken develop build as: {0}'.format(
                broken_spec_path))
            tmpdir = tempfile.mkdtemp()
            empty_file_path = os.path.join(tmpdir, 'empty.txt')

            try:
                with open(empty_file_path, 'w') as efd:
                    efd.write('')
                web_util.push_to_url(empty_file_path,
                                     broken_spec_path,
                                     keep_original=False,
                                     extra_args={'ContentType': 'text/plain'})
            except Exception as err:
                # If we got some kind of S3 (access denied or other connection
                # error), the first non boto-specific class in the exception
                # hierarchy is Exception.  Just print a warning and return
                msg = 'Error writing to broken specs list {0}: {1}'.format(
                    broken_spec_path, err)
                tty.warn(msg)
            finally:
                shutil.rmtree(tmpdir)

    # We generated the "spack install ..." command to "--keep-stage", copy
    # any logs from the staging directory to artifacts now
    spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)

    # Create buildcache on remote mirror, either on pr-specific mirror or
    # on the main mirror defined in the gitlab-enabled spack environment
    if spack_is_pr_pipeline:
        buildcache_mirror_url = pr_mirror_url
    else:
        buildcache_mirror_url = remote_mirror_url

    # If the install succeeded, create a buildcache entry for this job spec
    # and push it to one or more mirrors.  If the install did not succeed,
    # print out some instructions on how to reproduce this build failure
    # outside of the pipeline environment.
    if install_exit_code == 0:
        can_sign = spack_ci.can_sign_binaries()
        sign_binaries = can_sign and spack_is_pr_pipeline is False

        # Create buildcache in either the main remote mirror, or in the
        # per-PR mirror, if this is a PR pipeline
        if buildcache_mirror_url:
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          buildcache_mirror_url, sign_binaries)

            if cdash_build_id:
                tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
                    cdash_build_id, buildcache_mirror_url))
                spack_ci.write_cdashid_to_mirror(cdash_build_id, job_spec,
                                                 buildcache_mirror_url)

        # Create another copy of that buildcache in the per-pipeline
        # temporary storage mirror (this is only done if either
        # artifacts buildcache is enabled or a temporary storage url
        # prefix is set)
        if pipeline_mirror_url:
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          pipeline_mirror_url, sign_binaries)

            if cdash_build_id:
                tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
                    cdash_build_id, pipeline_mirror_url))
                spack_ci.write_cdashid_to_mirror(cdash_build_id, job_spec,
                                                 pipeline_mirror_url)
    else:
        tty.debug('spack install exited non-zero, will not create buildcache')

        api_root_url = get_env_var('CI_API_V4_URL')
        ci_project_id = get_env_var('CI_PROJECT_ID')
        ci_job_id = get_env_var('CI_JOB_ID')

        repro_job_url = '{0}/projects/{1}/jobs/{2}/artifacts'.format(
            api_root_url, ci_project_id, ci_job_id)

        # Control characters cause this to be printed in blue so it stands out
        reproduce_msg = """

\033[34mTo reproduce this build locally, run:

    spack ci reproduce-build {0} [--working-dir <dir>]

If this project does not have public pipelines, you will need to first:

    export GITLAB_PRIVATE_TOKEN=<generated_token>

... then follow the printed instructions.\033[0;0m

""".format(repro_job_url)

        print(reproduce_msg)

    # Tie job success/failure to the success/failure of building the spec
    return install_exit_code
コード例 #37
0
def config_add(args):
    """Add the given configuration to the specified config scope

    This is a stateful operation that edits the config files."""
    if not (args.file or args.path):
        tty.error("No changes requested. Specify a file or value.")
        setup_parser.add_parser.print_help()
        exit(1)

    scope, section = _get_scope_and_section(args)

    # Updates from file
    if args.file:
        # Get file as config dict
        data = spack.config.read_config_file(args.file)
        if any(k in data for k in spack.schema.env.keys):
            data = ev.config_dict(data)

        # update all sections from config dict
        # We have to iterate on keys to keep overrides from the file
        for section in data.keys():
            if section in spack.config.section_schemas.keys():
                # Special handling for compiler scope difference
                # Has to be handled after we choose a section
                if scope is None:
                    scope = spack.config.default_modify_scope(section)

                value = data[section]
                existing = spack.config.get(section, scope=scope)
                new = spack.config.merge_yaml(existing, value)

                spack.config.set(section, new, scope)

    if args.path:
        components = spack.config.process_config_path(args.path)

        has_existing_value = True
        path = ''
        override = False
        for idx, name in enumerate(components[:-1]):
            # First handle double colons in constructing path
            colon = '::' if override else ':' if path else ''
            path += colon + name
            if getattr(name, 'override', False):
                override = True
            else:
                override = False

            # Test whether there is an existing value at this level
            existing = spack.config.get(path, scope=scope)

            if existing is None:
                has_existing_value = False
                # We've nested further than existing config, so we need the
                # type information for validation to know how to handle bare
                # values appended to lists.
                existing = spack.config.get_valid_type(path)

                # construct value from this point down
                value = syaml.load_config(components[-1])
                for component in reversed(components[idx + 1:-1]):
                    value = {component: value}
                break

        if has_existing_value:
            path, _, value = args.path.rpartition(':')
            value = syaml.load_config(value)
            existing = spack.config.get(path, scope=scope)

        # append values to lists
        if isinstance(existing, list) and not isinstance(value, list):
            value = [value]

        # merge value into existing
        new = spack.config.merge_yaml(existing, value)
        spack.config.set(path, new, scope)
コード例 #38
0
def buildcache_copy(args):
    """Copy a buildcache entry and all its files from one mirror, given as
    '--base-dir', to some other mirror, specified as '--destination-url'.
    The specific buildcache entry to be copied from one location to the
    other is identified using the '--spec-yaml' argument."""
    # TODO: This sub-command should go away once #11117 is merged

    if not args.spec_yaml:
        tty.msg('No spec yaml provided, exiting.')
        sys.exit(1)

    if not args.base_dir:
        tty.msg('No base directory provided, exiting.')
        sys.exit(1)

    if not args.destination_url:
        tty.msg('No destination mirror url provided, exiting.')
        sys.exit(1)

    dest_url = args.destination_url

    if dest_url[0:7] != 'file://' and dest_url[0] != '/':
        tty.msg('Only urls beginning with "file://" or "/" are supported ' +
                'by buildcache copy.')
        sys.exit(1)

    try:
        with open(args.spec_yaml, 'r') as fd:
            spec = Spec.from_yaml(fd.read())
    except Exception as e:
        tty.debug(e)
        tty.error('Unable to concrectize spec from yaml {0}'.format(
            args.spec_yaml))
        sys.exit(1)

    dest_root_path = dest_url
    if dest_url[0:7] == 'file://':
        dest_root_path = dest_url[7:]

    build_cache_dir = bindist.build_cache_relative_path()

    tarball_rel_path = os.path.join(build_cache_dir,
                                    bindist.tarball_path_name(spec, '.spack'))
    tarball_src_path = os.path.join(args.base_dir, tarball_rel_path)
    tarball_dest_path = os.path.join(dest_root_path, tarball_rel_path)

    specfile_rel_path = os.path.join(build_cache_dir,
                                     bindist.tarball_name(spec, '.spec.yaml'))
    specfile_src_path = os.path.join(args.base_dir, specfile_rel_path)
    specfile_dest_path = os.path.join(dest_root_path, specfile_rel_path)

    cdashidfile_rel_path = os.path.join(build_cache_dir,
                                        bindist.tarball_name(spec, '.cdashid'))
    cdashid_src_path = os.path.join(args.base_dir, cdashidfile_rel_path)
    cdashid_dest_path = os.path.join(dest_root_path, cdashidfile_rel_path)

    # Make sure directory structure exists before attempting to copy
    os.makedirs(os.path.dirname(tarball_dest_path))

    # Now copy the specfile and tarball files to the destination mirror
    tty.msg('Copying {0}'.format(tarball_rel_path))
    shutil.copyfile(tarball_src_path, tarball_dest_path)

    tty.msg('Copying {0}'.format(specfile_rel_path))
    shutil.copyfile(specfile_src_path, specfile_dest_path)

    # Copy the cdashid file (if exists) to the destination mirror
    if os.path.exists(cdashid_src_path):
        tty.msg('Copying {0}'.format(cdashidfile_rel_path))
        shutil.copyfile(cdashid_src_path, cdashid_dest_path)
コード例 #39
0
    def _writer_daemon(self, stdin):
        """Daemon that writes output to the log file and stdout."""
        # Use line buffering (3rd param = 1) since Python 3 has a bug
        # that prevents unbuffered text I/O.
        in_pipe = os.fdopen(self.read_fd, 'r', 1)
        os.close(self.write_fd)

        echo = self.echo        # initial echo setting, user-controllable
        force_echo = False      # parent can force echo for certain output

        # list of streams to select from
        istreams = [in_pipe, stdin] if stdin else [in_pipe]

        log_file = self.log_file
        try:
            with keyboard_input(stdin):
                while True:
                    # No need to set any timeout for select.select
                    # Wait until a key press or an event on in_pipe.
                    rlist, _, _ = select.select(istreams, [], [])

                    # Allow user to toggle echo with 'v' key.
                    # Currently ignores other chars.
                    if stdin in rlist:
                        if stdin.read(1) == 'v':
                            echo = not echo

                    # Handle output from the with block process.
                    if in_pipe in rlist:
                        # If we arrive here it means that in_pipe was
                        # ready for reading : it should never happen that
                        # line is false-ish
                        line = in_pipe.readline()
                        if not line:
                            break  # EOF

                        # find control characters and strip them.
                        controls = control.findall(line)
                        line = re.sub(control, '', line)

                        # Echo to stdout if requested or forced
                        if echo or force_echo:
                            sys.stdout.write(line)
                            sys.stdout.flush()

                        # Stripped output to log file.
                        log_file.write(_strip(line))
                        log_file.flush()

                        if xon in controls:
                            force_echo = True
                        if xoff in controls:
                            force_echo = False
        except BaseException:
            tty.error("Exception occurred in writer daemon!")
            traceback.print_exc()

        finally:
            # send written data back to parent if we used a StringIO
            if self.write_log_in_parent:
                self.child.send(log_file.getvalue())
            log_file.close()

        # send echo value back to the parent so it can be preserved.
        self.child.send(echo)
コード例 #40
0
def install_specs(cli_args, kwargs, specs):
    """Do the actual installation.

    Args:
        cli_args (argparse.Namespace): argparse namespace with command arguments
        kwargs (dict):  keyword arguments
        specs (list):  list of (abstract, concrete) spec tuples
    """

    # handle active environment, if any
    env = ev.active_environment()

    try:
        if env:
            specs_to_install = []
            specs_to_add = []
            for abstract, concrete in specs:
                # This won't find specs added to the env since last
                # concretize, therefore should we consider enforcing
                # concretization of the env before allowing to install
                # specs?
                m_spec = env.matching_spec(abstract)

                # If there is any ambiguity in the above call to matching_spec
                # (i.e. if more than one spec in the environment matches), then
                # SpackEnvironmentError is raised, with a message listing the
                # the matches.  Getting to this point means there were either
                # no matches or exactly one match.

                if not m_spec:
                    tty.debug('{0} matched nothing in the env'.format(
                        abstract.name))
                    # no matches in the env
                    if cli_args.no_add:
                        msg = ('You asked to install {0} without adding it ' +
                               '(--no-add), but no such spec exists in ' +
                               'environment').format(abstract.name)
                        tty.die(msg)
                    else:
                        tty.debug('adding {0} as a root'.format(abstract.name))
                        specs_to_add.append((abstract, concrete))

                    continue

                tty.debug('exactly one match for {0} in env -> {1}'.format(
                    m_spec.name, m_spec.dag_hash()))

                if m_spec in env.roots() or cli_args.no_add:
                    # either the single match is a root spec (and --no-add is
                    # the default for roots) or --no-add was stated explicitly
                    tty.debug('just install {0}'.format(m_spec.name))
                    specs_to_install.append(m_spec)
                else:
                    # the single match is not a root (i.e. it's a dependency),
                    # and --no-add was not specified, so we'll add it as a
                    # root before installing
                    tty.debug('add {0} then install it'.format(m_spec.name))
                    specs_to_add.append((abstract, concrete))

            if specs_to_add:
                tty.debug('Adding the following specs as roots:')
                for abstract, concrete in specs_to_add:
                    tty.debug('  {0}'.format(abstract.name))
                    with env.write_transaction():
                        specs_to_install.append(
                            env.concretize_and_add(abstract, concrete))
                        env.write(regenerate=False)

            # Install the validated list of cli specs
            if specs_to_install:
                tty.debug('Installing the following cli specs:')
                for s in specs_to_install:
                    tty.debug('  {0}'.format(s.name))
                env.install_specs(specs_to_install, args=cli_args, **kwargs)
        else:
            installs = [(concrete.package, kwargs) for _, concrete in specs]
            builder = PackageInstaller(installs)
            builder.install()
    except spack.build_environment.InstallError as e:
        if cli_args.show_log_on_error:
            e.print_context()
            if not os.path.exists(e.pkg.build_log_path):
                tty.error("'spack install' created no log.")
            else:
                sys.stderr.write('Full build log:\n')
                with open(e.pkg.build_log_path) as log:
                    shutil.copyfileobj(log, sys.stderr)
        raise
コード例 #41
0
ファイル: style.py プロジェクト: vmiheer/spack
def style(parser, args):
    # ensure python version is new enough
    if sys.version_info < (3, 6):
        tty.die("spack style requires Python 3.6 or later.")

    # save initial working directory for relativizing paths later
    args.initial_working_dir = os.getcwd()

    # ensure that the config files we need actually exist in the spack prefix.
    # assertions b/c users should not ever see these errors -- they're checked in CI.
    assert os.path.isfile(os.path.join(spack.paths.prefix, "pyproject.toml"))
    assert os.path.isfile(os.path.join(spack.paths.prefix, ".flake8"))

    # validate spack root if the user provided one
    args.root = os.path.realpath(
        args.root) if args.root else spack.paths.prefix
    spack_script = os.path.join(args.root, "bin", "spack")
    if not os.path.exists(spack_script):
        tty.die("This does not look like a valid spack root.",
                "No such file: '%s'" % spack_script)

    file_list = args.files
    if file_list:

        def prefix_relative(path):
            return os.path.relpath(os.path.abspath(os.path.realpath(path)),
                                   args.root)

        file_list = [prefix_relative(p) for p in file_list]

    return_code = 0
    with working_dir(args.root):
        if not file_list:
            file_list = changed_files(args.base, args.untracked, args.all)
        print_style_header(file_list, args)

        commands = {}
        with spack.bootstrap.ensure_bootstrap_configuration():
            for tool_name, bootstrap_fn in tool_order:
                # Skip the tool if it was not requested
                if not getattr(args, tool_name):
                    continue

                commands[tool_name] = bootstrap_fn()

            for tool_name, bootstrap_fn in tool_order:
                # Skip the tool if it was not requested
                if not getattr(args, tool_name):
                    continue

                run_function, required = tools[tool_name]
                print_tool_header(tool_name)
                return_code |= run_function(commands[tool_name], file_list,
                                            args)

    if return_code == 0:
        tty.msg(color.colorize("@*{spack style checks were clean}"))
    else:
        tty.error(color.colorize("@*{spack style found errors}"))

    return return_code
コード例 #42
0
def refresh(module_type, specs, args):
    """Regenerates the module files for every spec in specs and every module
    type in module types.
    """
    check_module_set_name(args.module_set_name)

    # Prompt a message to the user about what is going to change
    if not specs:
        tty.msg('No package matches your query')
        return

    if not args.upstream_modules:
        specs = list(s for s in specs if not s.package.installed_upstream)

    if not args.yes_to_all:
        msg = 'You are about to regenerate {types} module files for:\n'
        tty.msg(msg.format(types=module_type))
        spack.cmd.display_specs(specs, long=True)
        print('')
        answer = tty.get_yes_or_no('Do you want to proceed?')
        if not answer:
            tty.die('Module file regeneration aborted.')

    # Cycle over the module types and regenerate module files

    cls = spack.modules.module_types[module_type]

    # Skip unknown packages.
    writers = [
        cls(spec, args.module_set_name) for spec in specs
        if spack.repo.path.exists(spec.name)
    ]

    # Filter blacklisted packages early
    writers = [x for x in writers if not x.conf.blacklisted]

    # Detect name clashes in module files
    file2writer = collections.defaultdict(list)
    for item in writers:
        file2writer[item.layout.filename].append(item)

    if len(file2writer) != len(writers):
        message = 'Name clashes detected in module files:\n'
        for filename, writer_list in file2writer.items():
            if len(writer_list) > 1:
                message += '\nfile: {0}\n'.format(filename)
                for x in writer_list:
                    message += 'spec: {0}\n'.format(x.spec.format())
        tty.error(message)
        tty.error('Operation aborted')
        raise SystemExit(1)

    if len(writers) == 0:
        msg = 'Nothing to be done for {0} module files.'
        tty.msg(msg.format(module_type))
        return
    # If we arrived here we have at least one writer
    module_type_root = writers[0].layout.dirname()

    # Proceed regenerating module files
    tty.msg('Regenerating {name} module files'.format(name=module_type))
    if os.path.isdir(module_type_root) and args.delete_tree:
        shutil.rmtree(module_type_root, ignore_errors=False)
    filesystem.mkdirp(module_type_root)

    # Dump module index after potentially removing module tree
    spack.modules.common.generate_module_index(module_type_root,
                                               writers,
                                               overwrite=args.delete_tree)
    for x in writers:
        try:
            x.write(overwrite=True)
        except Exception as e:
            tty.debug(e)
            msg = 'Could not write module file [{0}]'
            tty.warn(msg.format(x.layout.filename))
            tty.warn('\t--> {0} <--'.format(str(e)))
コード例 #43
0
def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
                   log_file_wrapper, control_pipe, filter_fn):
    """Daemon used by ``log_output`` to write to a log file and to ``stdout``.

    The daemon receives output from the parent process and writes it both
    to a log and, optionally, to ``stdout``.  The relationship looks like
    this::

        Terminal
           |
           |          +-------------------------+
           |          | Parent Process          |
           +--------> |   with log_output():    |
           | stdin    |     ...                 |
           |          +-------------------------+
           |            ^             | write_fd (parent's redirected stdout)
           |            | control     |
           |            | pipe        |
           |            |             v read_fd
           |          +-------------------------+   stdout
           |          | Writer daemon           |------------>
           +--------> |   read from read_fd     |   log_file
             stdin    |   write to out and log  |------------>
                      +-------------------------+

    Within the ``log_output`` handler, the parent's output is redirected
    to a pipe from which the daemon reads.  The daemon writes each line
    from the pipe to a log file and (optionally) to ``stdout``.  The user
    can hit ``v`` to toggle output on ``stdout``.

    In addition to the input and output file descriptors, the daemon
    interacts with the parent via ``control_pipe``.  It reports whether
    ``stdout`` was enabled or disabled when it finished and, if the
    ``log_file`` is a ``StringIO`` object, then the daemon also sends the
    logged output back to the parent as a string, to be written to the
    ``StringIO`` in the parent. This is mainly for testing.

    Arguments:
        stdin_multiprocess_fd (int): input from the terminal
        read_multiprocess_fd (int): pipe for reading from parent's redirected
            stdout
        echo (bool): initial echo setting -- controlled by user and
            preserved across multiple writer daemons
        log_file_wrapper (FileWrapper): file to log all output
        control_pipe (Pipe): multiprocessing pipe on which to send control
            information to the parent
        filter_fn (callable, optional): function to filter each line of output

    """
    # If this process was forked, then it will inherit file descriptors from
    # the parent process. This process depends on closing all instances of
    # write_fd to terminate the reading loop, so we close the file descriptor
    # here. Forking is the process spawning method everywhere except Mac OS
    # for Python >= 3.8 and on Windows
    if sys.version_info < (3, 8) or sys.platform != 'darwin':
        os.close(write_fd)

    # Use line buffering (3rd param = 1) since Python 3 has a bug
    # that prevents unbuffered text I/O.
    if sys.version_info < (3,):
        in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1)
    else:
        # Python 3.x before 3.7 does not open with UTF-8 encoding by default
        in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1, encoding='utf-8')

    if stdin_multiprocess_fd:
        stdin = os.fdopen(stdin_multiprocess_fd.fd)
    else:
        stdin = None

    # list of streams to select from
    istreams = [in_pipe, stdin] if stdin else [in_pipe]
    force_echo = False      # parent can force echo for certain output

    log_file = log_file_wrapper.unwrap()

    try:
        with keyboard_input(stdin) as kb:
            while True:
                # fix the terminal settings if we recently came to
                # the foreground
                kb.check_fg_bg()

                # wait for input from any stream. use a coarse timeout to
                # allow other checks while we wait for input
                rlist, _, _ = _retry(select.select)(istreams, [], [], 1e-1)

                # Allow user to toggle echo with 'v' key.
                # Currently ignores other chars.
                # only read stdin if we're in the foreground
                if stdin in rlist and not _is_background_tty(stdin):
                    # it's possible to be backgrounded between the above
                    # check and the read, so we ignore SIGTTIN here.
                    with ignore_signal(signal.SIGTTIN):
                        try:
                            if stdin.read(1) == 'v':
                                echo = not echo
                        except IOError as e:
                            # If SIGTTIN is ignored, the system gives EIO
                            # to let the caller know the read failed b/c it
                            # was in the bg. Ignore that too.
                            if e.errno != errno.EIO:
                                raise

                if in_pipe in rlist:
                    line_count = 0
                    try:
                        while line_count < 100:
                            # Handle output from the calling process.
                            try:
                                line = _retry(in_pipe.readline)()
                            except UnicodeDecodeError:
                                # installs like --test=root gpgme produce non-UTF8 logs
                                line = '<line lost: output was not encoded as UTF-8>\n'

                            if not line:
                                return
                            line_count += 1

                            # find control characters and strip them.
                            clean_line, num_controls = control.subn('', line)

                            # Echo to stdout if requested or forced.
                            if echo or force_echo:
                                output_line = clean_line
                                if filter_fn:
                                    output_line = filter_fn(clean_line)
                                sys.stdout.write(output_line)

                            # Stripped output to log file.
                            log_file.write(_strip(clean_line))

                            if num_controls > 0:
                                controls = control.findall(line)
                                if xon in controls:
                                    force_echo = True
                                if xoff in controls:
                                    force_echo = False

                            if not _input_available(in_pipe):
                                break
                    finally:
                        if line_count > 0:
                            if echo or force_echo:
                                sys.stdout.flush()
                            log_file.flush()

    except BaseException:
        tty.error("Exception occurred in writer daemon!")
        traceback.print_exc()

    finally:
        # send written data back to parent if we used a StringIO
        if isinstance(log_file, StringIO):
            control_pipe.send(log_file.getvalue())
        log_file_wrapper.close()
        close_connection_and_file(read_multiprocess_fd, in_pipe)
        if stdin_multiprocess_fd:
            close_connection_and_file(stdin_multiprocess_fd, stdin)

        # send echo value back to the parent so it can be preserved.
        control_pipe.send(echo)
コード例 #44
0
def mirror_create(args):
    """Create a directory to be used as a spack mirror, and fill it with
       package archives."""
    # try to parse specs from the command line first.
    with spack.concretize.disable_compiler_existence_check():
        specs = spack.cmd.parse_specs(args.specs, concretize=True)

        # If there is a file, parse each line as a spec and add it to the list.
        if args.file:
            if specs:
                tty.die("Cannot pass specs on the command line with --file.")
            specs = _read_specs_from_file(args.file)

        # If nothing is passed, use environment or all if no active env
        if not specs:
            env = ev.get_env(args, 'mirror')
            if env:
                specs = env.specs_by_hash.values()
            else:
                specs = [Spec(n) for n in spack.repo.all_package_names()]
                specs.sort(key=lambda s: s.format("{name}{@version}").lower())

        # If the user asked for dependencies, traverse spec DAG get them.
        if args.dependencies:
            new_specs = set()
            for spec in specs:
                spec.concretize()
                for s in spec.traverse():
                    new_specs.add(s)
            specs = list(new_specs)

        # Skip external specs, as they are already installed
        external_specs = [s for s in specs if s.external]
        specs = [s for s in specs if not s.external]

        for spec in external_specs:
            msg = 'Skipping {0} as it is an external spec.'
            tty.msg(msg.format(spec.cshort_spec))

        # Default name for directory is spack-mirror-<DATESTAMP>
        directory = args.directory
        if not directory:
            timestamp = datetime.now().strftime("%Y-%m-%d")
            directory = 'spack-mirror-' + timestamp

        # Make sure nothing is in the way.
        existed = os.path.isdir(directory)

        # Actually do the work to create the mirror
        present, mirrored, error = spack.mirror.create(
            directory, specs, num_versions=args.versions_per_spec)
        p, m, e = len(present), len(mirrored), len(error)

        verb = "updated" if existed else "created"
        tty.msg(
            "Successfully %s mirror in %s" % (verb, directory),
            "Archive stats:",
            "  %-4d already present"  % p,
            "  %-4d added"            % m,
            "  %-4d failed to fetch." % e)
        if error:
            tty.error("Failed downloads:")
            colify(s.cformat("{name}{@version}") for s in error)
            sys.exit(1)