Esempio n. 1
0
    def __init__(self, experiment):
        self.experiment = experiment

        self.dir = ''

        self.resources = []
        self.linked_resources = []
        self.env_vars = {}
        self.new_files = []

        self.commands = OrderedDict()

        self.optional_output = []
        self.required_output = []

        self.properties = tools.Properties()
Esempio n. 2
0
    if not valid_module_name(name):
        tty.die("Package name can only contain A-Z, a-z, 0-9, '_' and '-'")

    tty.msg("This looks like a URL for %s version %s." % (name, version))
    tty.msg("Creating template for package %s" % name)

    # Create a directory for the new package.
    pkg_path = spack.db.filename_for_package_name(name)
    if os.path.exists(pkg_path) and not args.force:
        tty.die("%s already exists." % pkg_path)
    else:
        mkdirp(os.path.dirname(pkg_path))

    versions = spack.package.find_versions_of_archive(url)
    rkeys = sorted(versions.keys(), reverse=True)
    versions = OrderedDict(zip(rkeys, (versions[v] for v in rkeys)))

    archives_to_fetch = 1
    if not versions:
        # If the fetch failed for some reason, revert to what the user provided
        versions = {version: url}
    elif len(versions) > 1:
        tty.msg(
            "Found %s versions of %s:" % (len(versions), name),
            *spack.cmd.elide_list(
                ["%-10s%s" % (v, u) for v, u in versions.iteritems()]))
        print
        archives_to_fetch = tty.get_number(
            "Include how many checksums in the package file?",
            default=5,
            abort='q')
Esempio n. 3
0
    if not valid_module_name(name):
        tty.die("Package name can only contain A-Z, a-z, 0-9, '_' and '-'")

    tty.msg("This looks like a URL for %s version %s." % (name, version))
    tty.msg("Creating template for package %s" % name)

    # Create a directory for the new package.
    pkg_path = spack.db.filename_for_package_name(name)
    if os.path.exists(pkg_path) and not args.force:
        tty.die("%s already exists." % pkg_path)
    else:
        mkdirp(os.path.dirname(pkg_path))

    versions = spack.package.find_versions_of_archive(url)
    rkeys = sorted(versions.keys(), reverse=True)
    versions = OrderedDict(zip(rkeys, (versions[v] for v in rkeys)))

    archives_to_fetch = 1
    if not versions:
        # If the fetch failed for some reason, revert to what the user provided
        versions = {version: url}
    elif len(versions) > 1:
        tty.msg(
            "Found %s versions of %s:" % (len(versions), name),
            *spack.cmd.elide_list(["%-10s%s" % (v, u) for v, u in versions.iteritems()])
        )
        print
        archives_to_fetch = tty.get_number("Include how many checksums in the package file?", default=5, abort="q")

        if not archives_to_fetch:
            tty.msg("Aborted.")
Esempio n. 4
0
class Run(object):
    """
    A Task can consist of one or multiple Runs
    """
    def __init__(self, experiment):
        self.experiment = experiment

        self.dir = ''

        self.resources = []
        self.linked_resources = []
        self.env_vars = {}
        self.new_files = []

        self.commands = OrderedDict()

        self.optional_output = []
        self.required_output = []

        self.properties = tools.Properties()

    def set_property(self, name, value):
        """
        Add a key-value property to a run. These can be used later for
        evaluation

        Example:
        >>> run.set_property('domain', 'gripper')
        """
        self.properties[name] = value

    def require_resource(self, resource_name):
        """
        Some resources can be used by linking to the resource in the
        experiment directory without copying it into each run

        In the argo cluster however, requiring a resource implies copying it
        into the task directory.

        Example:
        >>> run.require_resource('PLANNER')

        Make the planner resource available for this run
        In environments like the argo cluster, this implies
        copying the planner into each task. For the gkigrid, we merely
        need to set up the PLANNER environment variable.
        """
        self.linked_resources.append(resource_name)

    def add_resource(self, resource_name, source, dest, required=True,
                     symlink=False):
        """
        Example:
        >>> run.add_resource('DOMAIN', '../benchmarks/gripper/domain.pddl',
                             'domain.pddl')

        Copy "../benchmarks/gripper/domain.pddl" into the run
        directory under name "domain.pddl" and make it available as
        resource "DOMAIN" (usable as environment variable $DOMAIN).
        """
        self.resources.append((source, dest, required, symlink))
        self.env_vars[resource_name] = dest

    def add_command(self, name, command, **kwargs):
        """Adds a command to the run.

        "name" is the command's name.
        "command" has to be a list of strings.

        The items in kwargs are passed to the calls.call.Call() class. You can
        find the valid keys there.

        kwargs can also contain a value for "abort_on_failure" which makes the
        run abort if the command does not return 0.

        The remaining items in kwargs are passed to subprocess.Popen()
        The allowed parameters can be found at
        http://docs.python.org/library/subprocess.html

        Examples:
        >>> run.add_command('translate', [run.translator.shell_name,
                                          'domain.pddl', 'problem.pddl'])
        >>> run.add_command('preprocess', [run.preprocessor.shell_name],
                            {'stdin': 'output.sas'})
        >>> run.add_command('validate', ['VALIDATE', 'DOMAIN', 'PROBLEM',
                                         'sas_plan'])

        """
        assert type(name) is str, 'The command name must be a string'
        assert type(command) in (tuple, list), 'The command must be a list'
        name = name.replace(' ', '_')
        self.commands[name] = (command, kwargs)

    def declare_optional_output(self, file_glob):
        """
        Example:
        >>> run.declare_optional_output('plan.soln*')

        Specifies that all files names "plan.soln*" (using
        shell-style glob patterns) are part of the experiment output.
        """
        self.optional_output.append(file_glob)

    def declare_required_output(self, filename):
        """
        Declare output files that must be present at the end or we have an
        error. A specification like this is e.g. necessary for the Argo
        cluster. On the gkigrid, this wouldn't do anything, although
        the declared outputs should be stored somewhere so that we
        can later verify that all went according to plan.
        """
        self.required_output.append(filename)

    def build(self):
        """
        After having made all the necessary adjustments with the methods above,
        this method can be used to write everything to the disk.
        """
        assert self.dir

        tools.overwrite_dir(self.dir)
        # We need to build the linked resources before the run script.
        # Only this way we have all resources in self.resources
        # (linked ones too)
        self._build_linked_resources()
        self._build_run_script()
        self._build_resources()
        self._build_properties_file()

    def _build_run_script(self):
        if not self.commands:
            msg = 'Please add at least one command via run.add_command()'
            raise SystemExit(msg)

        self.experiment.env_vars.update(self.env_vars)
        self.env_vars = self.experiment.env_vars.copy()

        run_script = open(os.path.join(tools.DATA_DIR, 'run-template.py')).read()

        def make_call(name, cmd, kwargs):
            abort_on_failure = kwargs.pop('abort_on_failure',
                                          DEFAULT_ABORT_ON_FAILURE)
            if not type(cmd) is list:
                logging.error('Commands have to be lists of strings. '
                              'The command <%s> is not a list.' % cmd)
                sys.exit(1)
            if not cmd:
                logging.error('Command "%s" cannot be empty' % name)
                sys.exit(1)

            # Support running globally installed binaries
            def format_arg(arg):
                return arg if arg in self.env_vars else '"%s"' % arg

            def format_key_value_pair(key, val):
                return '%s=%s' % (key, val if val in self.env_vars else repr(val))

            cmd_string = '[%s]' % ', '.join([format_arg(arg) for arg in cmd])
            kwargs_string = ', '.join(format_key_value_pair(key, value)
                                      for key, value in kwargs.items())
            parts = [cmd_string]
            if kwargs_string:
                parts.append(kwargs_string)
            call = ('retcode = Call(%s, **redirects).wait()\n'
                    'save_returncode("%s", retcode)\n') % (', '.join(parts), name)
            if abort_on_failure:
                call += ('if not retcode == 0:\n'
                         '    print_(driver_log, "%s returned %%s" %% retcode)\n'
                         '    sys.exit(1)\n' % name)
            return call

        calls_text = '\n'.join(make_call(name, cmd, kwargs)
                               for name, (cmd, kwargs) in self.commands.items())

        if self.env_vars:
            env_vars_text = ''
            for var, filename in sorted(self.env_vars.items()):
                abs_filename = self._get_abs_path(filename)
                rel_filename = self._get_rel_path(abs_filename)
                env_vars_text += ('%s = "%s"\n' % (var, rel_filename))
        else:
            env_vars_text = '"Here you would find variable declarations"'

        for old, new in [('VARIABLES', env_vars_text), ('CALLS', calls_text)]:
            run_script = run_script.replace('"""%s"""' % old, new)

        self.new_files.append(('run', run_script))
        return

    def _build_linked_resources(self):
        """
        If we are building an argo experiment, add all linked resources to
        the resources list
        """
        self.experiment.environment.build_linked_resources(self)

    def _build_resources(self):
        for name, content in self.new_files:
            filename = self._get_abs_path(name)
            with open(filename, 'w') as file:
                logging.debug('Writing file "%s"' % filename)
                file.write(content)
                if name == 'run':
                    # Make run script executable
                    os.chmod(filename, 0755)

        for source, dest, required, symlink in self.resources:
            if required and not os.path.exists(source):
                logging.error('The required resource can not be found: %s' %
                              source)
                sys.exit(1)
            dest = self._get_abs_path(dest)
            if symlink:
                source = self._get_rel_path(source)
                os.symlink(source, dest)
                logging.debug('Linking from %s to %s' % (source, dest))
                continue

            logging.debug('Copying %s to %s' % (source, dest))
            tools.copy(source, dest, required)

    def _build_properties_file(self):
        # Check correctness of id property
        run_id = self.properties.get('id')
        if run_id is None:
            logging.error('Each run must have an id')
            sys.exit(1)
        if not type(run_id) is list:
            logging.error('id must be a list, but %s is not' % run_id)
            sys.exit(1)
        self.properties['id'] = [str(item) for item in run_id]

        self.properties.filename = self._get_abs_path('properties')
        self.properties.write()

    def _get_abs_path(self, rel_path):
        """
        Example:
        >>> _get_abs_path('run')
        /home/user/mytestjob/runs-00001-00100/run
        """
        return os.path.join(self.dir, rel_path)

    def _get_rel_path(self, abs_path):
        return os.path.relpath(abs_path, start=self.dir)
Esempio n. 5
0
from external.ordereddict import OrderedDict
from llnl.util.lang import memoized
import spack.error

__all__ = [
    'SpackConfigParser', 'get_config', 'SpackConfigurationError',
    'InvalidConfigurationScopeError', 'InvalidSectionNameError',
    'ReadOnlySpackConfigError', 'ConfigParserError', 'NoOptionError',
    'NoSectionError']

_named_section_re = r'([^ ]+) "([^"]+)"'

"""Names of scopes and their corresponding configuration files."""
_scopes = OrderedDict({
    'site' : os.path.join(spack.etc_path, 'spackconfig'),
    'user' : os.path.expanduser('~/.spackconfig')
})

_field_regex = r'^([\w-]*)'        \
               r'(?:\.(.*(?=.)))?' \
               r'(?:\.([\w-]+))?$'

_section_regex = r'^([\w-]*)\s*' \
                 r'\"([^"]*\)\"$'


# Cache of configs -- we memoize this for performance.
_config = {}

def get_config(scope=None, **kwargs):
    """Get a Spack configuration object, which can be used to set options.