예제 #1
0
def add_include_entry(env, inc, prepend=True):
    include = config_dict(env.yaml).get('include', [])
    if len(include) == 0:
        # includes is missing, need to add it
        env.yaml['spack'].insert(0, 'include', [])
        include = config_dict(env.yaml).get('include', [])
    if prepend:
        include.insert(0, inc)
    else:
        include.append(inc)
    env.write()
예제 #2
0
def add_from_file(filename, scope=None):
    """Add updates to a config from a filename
    """
    import spack.environment as ev

    # Get file as config dict
    data = read_config_file(filename)
    if any(k in data for k in spack.schema.env.keys):
        data = ev.config_dict(data)

    # update all sections from config dict
    # We have to iterate on keys to keep overrides from the file
    for section in data.keys():
        if section in section_schemas.keys():
            # Special handling for compiler scope difference
            # Has to be handled after we choose a section
            if scope is None:
                scope = default_modify_scope(section)

            value = data[section]
            existing = get(section, scope=scope)
            new = merge_yaml(existing, value)

            # We cannot call config.set directly (set is a type)
            config.set(section, new, scope)
예제 #3
0
파일: __init__.py 프로젝트: wangvsa/spack
def create(configuration):
    """Returns a writer that conforms to the configuration passed as input.

    Args:
        configuration: how to generate the current recipe
    """
    name = ev.config_dict(configuration)['container']['format']
    return _writer_factory[name](configuration)
예제 #4
0
def create(configuration, last_phase=None):
    """Returns a writer that conforms to the configuration passed as input.

    Args:
        configuration (dict): how to generate the current recipe
        last_phase (str): last phase to be printed or None to print them all
    """
    name = ev.config_dict(configuration)['container']['format']
    return _writer_factory[name](configuration, last_phase)
예제 #5
0
파일: ci.py 프로젝트: cgmb/rocm-spack
def ci_reindex(args):
    """Rebuild the buildcache index associated with the mirror in the
       active, gitlab-enabled environment. """
    env = ev.get_env(args, 'ci rebuild-index', required=True)
    yaml_root = ev.config_dict(env.yaml)

    if 'mirrors' not in yaml_root or len(yaml_root['mirrors'].values()) < 1:
        tty.die('spack ci rebuild-index requires an env containing a mirror')

    ci_mirrors = yaml_root['mirrors']
    mirror_urls = [url for url in ci_mirrors.values()]
    remote_mirror_url = mirror_urls[0]

    buildcache.update_index(remote_mirror_url, update_keys=True)
예제 #6
0
    def __init__(self, config, last_phase):
        self.config = ev.config_dict(config)
        self.container_config = self.config['container']

        # Operating system tag as written in the configuration file
        self.operating_system_key = self.container_config['images'].get('os')
        # Get base images and verify the OS
        bootstrap, build, final = _stage_base_images(
            self.container_config['images'])
        self.bootstrap_image = bootstrap
        self.build_image = build
        self.final_image = final

        # Record the last phase
        self.last_phase = last_phase
예제 #7
0
파일: develop.py 프로젝트: eic/spack
    def check_develop(self, env, spec, path=None):
        path = path or spec.name

        # check in memory representation
        assert spec.name in env.dev_specs
        dev_specs_entry = env.dev_specs[spec.name]
        assert dev_specs_entry['path'] == path
        assert dev_specs_entry['spec'] == str(spec)

        # check yaml representation
        yaml = ev.config_dict(env.yaml)
        assert spec.name in yaml['develop']
        yaml_entry = yaml['develop'][spec.name]
        assert yaml_entry['spec'] == str(spec)
        if path == spec.name:
            # default paths aren't written out
            assert 'path' not in yaml_entry
        else:
            assert yaml_entry['path'] == path
예제 #8
0
def evaluate_external(tmpdir, yaml_file):
    ext_path = setupExternalEnv(tmpdir)
    manifest = tmpdir.join('spack.yaml')

    with open(str(manifest), 'w') as f:
        f.write(yaml_file)

    env('create', '-d', 'test', 'spack.yaml', )
    assert os.path.isfile('test/spack.yaml')

    with ev.Environment('test') as e:
        args = ParserMock(ext_path, merge=True)
        with patch("manager_cmds.external.write_spec",
                   return_value=str(
                       ExtPackage(
                           'cmake', '[email protected]', '/path/top/some/view')
                   )) as mock_write:
            manager_cmds.external.external(None, args)
        # check that the include entry was added to the spack.yaml
        assert mock_write.called_once()
        includes = config_dict(e.yaml).get('include', [])
        assert 1 == len(includes)
        assert 'externals.yaml' == str(includes[0])
예제 #9
0
파일: ci.py 프로젝트: cgmb/rocm-spack
def ci_rebuild(args):
    """Check a single spec against the remote mirror, and rebuild it from
       source if the mirror does not contain the full hash match of the spec
       as computed locally. """
    env = ev.get_env(args, 'ci rebuild', required=True)

    # Make sure the environment is "gitlab-enabled", or else there's nothing
    # to do.
    yaml_root = ev.config_dict(env.yaml)
    gitlab_ci = None
    if 'gitlab-ci' in yaml_root:
        gitlab_ci = yaml_root['gitlab-ci']

    if not gitlab_ci:
        tty.die('spack ci rebuild requires an env containing gitlab-ci cfg')

    # Grab the environment variables we need.  These either come from the
    # pipeline generation step ("spack ci generate"), where they were written
    # out as variables, or else provided by GitLab itself.
    pipeline_artifacts_dir = get_env_var('SPACK_ARTIFACTS_ROOT')
    job_log_dir = get_env_var('SPACK_JOB_LOG_DIR')
    repro_dir = get_env_var('SPACK_JOB_REPRO_DIR')
    local_mirror_dir = get_env_var('SPACK_LOCAL_MIRROR_DIR')
    concrete_env_dir = get_env_var('SPACK_CONCRETE_ENV_DIR')
    ci_pipeline_id = get_env_var('CI_PIPELINE_ID')
    ci_job_name = get_env_var('CI_JOB_NAME')
    signing_key = get_env_var('SPACK_SIGNING_KEY')
    root_spec = get_env_var('SPACK_ROOT_SPEC')
    job_spec_pkg_name = get_env_var('SPACK_JOB_SPEC_PKG_NAME')
    compiler_action = get_env_var('SPACK_COMPILER_ACTION')
    cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
    related_builds = get_env_var('SPACK_RELATED_BUILDS_CDASH')
    spack_pipeline_type = get_env_var('SPACK_PIPELINE_TYPE')
    pr_mirror_url = get_env_var('SPACK_PR_MIRROR_URL')
    remote_mirror_url = get_env_var('SPACK_REMOTE_MIRROR_URL')

    # Construct absolute paths relative to current $CI_PROJECT_DIR
    ci_project_dir = get_env_var('CI_PROJECT_DIR')
    pipeline_artifacts_dir = os.path.join(ci_project_dir,
                                          pipeline_artifacts_dir)
    job_log_dir = os.path.join(ci_project_dir, job_log_dir)
    repro_dir = os.path.join(ci_project_dir, repro_dir)
    local_mirror_dir = os.path.join(ci_project_dir, local_mirror_dir)
    concrete_env_dir = os.path.join(ci_project_dir, concrete_env_dir)

    # Debug print some of the key environment variables we should have received
    tty.debug('pipeline_artifacts_dir = {0}'.format(pipeline_artifacts_dir))
    tty.debug('root_spec = {0}'.format(root_spec))
    tty.debug('remote_mirror_url = {0}'.format(remote_mirror_url))
    tty.debug('job_spec_pkg_name = {0}'.format(job_spec_pkg_name))
    tty.debug('compiler_action = {0}'.format(compiler_action))

    # Query the environment manifest to find out whether we're reporting to a
    # CDash instance, and if so, gather some information from the manifest to
    # support that task.
    enable_cdash = False
    if 'cdash' in yaml_root:
        enable_cdash = True
        ci_cdash = yaml_root['cdash']
        job_spec_buildgroup = ci_cdash['build-group']
        cdash_base_url = ci_cdash['url']
        cdash_project = ci_cdash['project']
        proj_enc = urlencode({'project': cdash_project})
        eq_idx = proj_enc.find('=') + 1
        cdash_project_enc = proj_enc[eq_idx:]
        cdash_site = ci_cdash['site']
        tty.debug('cdash_base_url = {0}'.format(cdash_base_url))
        tty.debug('cdash_project = {0}'.format(cdash_project))
        tty.debug('cdash_project_enc = {0}'.format(cdash_project_enc))
        tty.debug('cdash_build_name = {0}'.format(cdash_build_name))
        tty.debug('cdash_site = {0}'.format(cdash_site))
        tty.debug('related_builds = {0}'.format(related_builds))
        tty.debug('job_spec_buildgroup = {0}'.format(job_spec_buildgroup))

    # Is this a pipeline run on a spack PR or a merge to develop?  It might
    # be neither, e.g. a pipeline run on some environment repository.
    spack_is_pr_pipeline = spack_pipeline_type == 'spack_pull_request'
    spack_is_develop_pipeline = spack_pipeline_type == 'spack_protected_branch'

    tty.debug('Pipeline type - PR: {0}, develop: {1}'.format(
        spack_is_pr_pipeline, spack_is_develop_pipeline))

    # Figure out what is our temporary storage mirror: Is it artifacts
    # buildcache?  Or temporary-storage-url-prefix?  In some cases we need to
    # force something or pipelines might not have a way to propagate build
    # artifacts from upstream to downstream jobs.
    pipeline_mirror_url = None

    temp_storage_url_prefix = None
    if 'temporary-storage-url-prefix' in gitlab_ci:
        temp_storage_url_prefix = gitlab_ci['temporary-storage-url-prefix']
        pipeline_mirror_url = url_util.join(temp_storage_url_prefix,
                                            ci_pipeline_id)

    enable_artifacts_mirror = False
    if 'enable-artifacts-buildcache' in gitlab_ci:
        enable_artifacts_mirror = gitlab_ci['enable-artifacts-buildcache']
        if (enable_artifacts_mirror
                or (spack_is_pr_pipeline and not enable_artifacts_mirror
                    and not temp_storage_url_prefix)):
            # If you explicitly enabled the artifacts buildcache feature, or
            # if this is a PR pipeline but you did not enable either of the
            # per-pipeline temporary storage features, we force the use of
            # artifacts buildcache.  Otherwise jobs will not have binary
            # dependencies from previous stages available since we do not
            # allow pushing binaries to the remote mirror during PR pipelines.
            enable_artifacts_mirror = True
            pipeline_mirror_url = 'file://' + local_mirror_dir
            mirror_msg = 'artifact buildcache enabled, mirror url: {0}'.format(
                pipeline_mirror_url)
            tty.debug(mirror_msg)

    # Whatever form of root_spec we got, use it to get a map giving us concrete
    # specs for this job and all of its dependencies.
    spec_map = spack_ci.get_concrete_specs(env, root_spec, job_spec_pkg_name,
                                           related_builds, compiler_action)
    job_spec = spec_map[job_spec_pkg_name]

    job_spec_yaml_file = '{0}.yaml'.format(job_spec_pkg_name)
    job_spec_yaml_path = os.path.join(repro_dir, job_spec_yaml_file)

    # To provide logs, cdash reports, etc for developer download/perusal,
    # these things have to be put into artifacts.  This means downstream
    # jobs that "need" this job will get those artifacts too.  So here we
    # need to clean out the artifacts we may have got from upstream jobs.

    cdash_report_dir = os.path.join(pipeline_artifacts_dir, 'cdash_report')
    if os.path.exists(cdash_report_dir):
        shutil.rmtree(cdash_report_dir)

    if os.path.exists(job_log_dir):
        shutil.rmtree(job_log_dir)

    if os.path.exists(repro_dir):
        shutil.rmtree(repro_dir)

    # Now that we removed them if they existed, create the directories we
    # need for storing artifacts.  The cdash_report directory will be
    # created internally if needed.
    os.makedirs(job_log_dir)
    os.makedirs(repro_dir)

    # Copy the concrete environment files to the repro directory so we can
    # expose them as artifacts and not conflict with the concrete environment
    # files we got as artifacts from the upstream pipeline generation job.
    # Try to cast a slightly wider net too, and hopefully get the generated
    # pipeline yaml.  If we miss it, the user will still be able to go to the
    # pipeline generation job and get it from there.
    target_dirs = [concrete_env_dir, pipeline_artifacts_dir]

    for dir_to_list in target_dirs:
        for file_name in os.listdir(dir_to_list):
            src_file = os.path.join(dir_to_list, file_name)
            if os.path.isfile(src_file):
                dst_file = os.path.join(repro_dir, file_name)
                shutil.copyfile(src_file, dst_file)

    # If signing key was provided via "SPACK_SIGNING_KEY", then try to
    # import it.
    if signing_key:
        spack_ci.import_signing_key(signing_key)

    # Depending on the specifics of this job, we might need to turn on the
    # "config:install_missing compilers" option (to build this job spec
    # with a bootstrapped compiler), or possibly run "spack compiler find"
    # (to build a bootstrap compiler or one of its deps in a
    # compiler-agnostic way), or maybe do nothing at all (to build a spec
    # using a compiler already installed on the target system).
    spack_ci.configure_compilers(compiler_action)

    # Write this job's spec yaml into the reproduction directory, and it will
    # also be used in the generated "spack install" command to install the spec
    tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path))
    with open(job_spec_yaml_path, 'w') as fd:
        fd.write(job_spec.to_yaml(hash=ht.build_hash))

    # Write the concrete root spec yaml into the reproduction directory
    root_spec_yaml_path = os.path.join(repro_dir, 'root.yaml')
    with open(root_spec_yaml_path, 'w') as fd:
        fd.write(spec_map['root'].to_yaml(hash=ht.build_hash))

    # Write some other details to aid in reproduction into an artifact
    repro_file = os.path.join(repro_dir, 'repro.json')
    repro_details = {
        'job_name': ci_job_name,
        'job_spec_yaml': job_spec_yaml_file,
        'root_spec_yaml': 'root.yaml',
        'ci_project_dir': ci_project_dir
    }
    with open(repro_file, 'w') as fd:
        fd.write(json.dumps(repro_details))

    # Write information about spack into an artifact in the repro dir
    spack_info = spack_ci.get_spack_info()
    spack_info_file = os.path.join(repro_dir, 'spack_info.txt')
    with open(spack_info_file, 'w') as fd:
        fd.write('\n{0}\n'.format(spack_info))

    # If we decided there should be a temporary storage mechanism, add that
    # mirror now so it's used when we check for a full hash match already
    # built for this spec.
    if pipeline_mirror_url:
        spack.mirror.add(spack_ci.TEMP_STORAGE_MIRROR_NAME,
                         pipeline_mirror_url, cfg.default_modify_scope())

    cdash_build_id = None
    cdash_build_stamp = None

    # Check configured mirrors for a built spec with a matching full hash
    matches = bindist.get_mirrors_for_spec(job_spec,
                                           full_hash_match=True,
                                           index_only=False)

    if matches:
        # Got a full hash match on at least one configured mirror.  All
        # matches represent the fully up-to-date spec, so should all be
        # equivalent.  If artifacts mirror is enabled, we just pick one
        # of the matches and download the buildcache files from there to
        # the artifacts, so they're available to be used by dependent
        # jobs in subsequent stages.
        tty.msg('No need to rebuild {0}, found full hash match at: '.format(
            job_spec_pkg_name))
        for match in matches:
            tty.msg('    {0}'.format(match['mirror_url']))
        if enable_artifacts_mirror:
            matching_mirror = matches[0]['mirror_url']
            build_cache_dir = os.path.join(local_mirror_dir, 'build_cache')
            tty.debug('Getting {0} buildcache from {1}'.format(
                job_spec_pkg_name, matching_mirror))
            tty.debug('Downloading to {0}'.format(build_cache_dir))
            buildcache.download_buildcache_files(job_spec, build_cache_dir,
                                                 False, matching_mirror)

        # Now we are done and successful
        sys.exit(0)

    # No full hash match anywhere means we need to rebuild spec

    # Start with spack arguments
    install_args = [base_arg for base_arg in CI_REBUILD_INSTALL_BASE_ARGS]

    config = cfg.get('config')
    if not config['verify_ssl']:
        install_args.append('-k')

    install_args.extend([
        'install',
        '--keep-stage',
        '--require-full-hash-match',
    ])

    can_verify = spack_ci.can_verify_binaries()
    verify_binaries = can_verify and spack_is_pr_pipeline is False
    if not verify_binaries:
        install_args.append('--no-check-signature')

    # If CDash reporting is enabled, we first register this build with
    # the specified CDash instance, then relate the build to those of
    # its dependencies.
    if enable_cdash:
        tty.debug('CDash: Registering build')
        (cdash_build_id, cdash_build_stamp) = spack_ci.register_cdash_build(
            cdash_build_name, cdash_base_url, cdash_project, cdash_site,
            job_spec_buildgroup)

        if cdash_build_id is not None:
            cdash_upload_url = '{0}/submit.php?project={1}'.format(
                cdash_base_url, cdash_project_enc)

            install_args.extend([
                '--cdash-upload-url',
                cdash_upload_url,
                '--cdash-build',
                cdash_build_name,
                '--cdash-site',
                cdash_site,
                '--cdash-buildstamp',
                cdash_build_stamp,
            ])

            tty.debug('CDash: Relating build with dependency builds')
            spack_ci.relate_cdash_builds(
                spec_map, cdash_base_url, cdash_build_id, cdash_project,
                [pipeline_mirror_url, pr_mirror_url, remote_mirror_url])

    # A compiler action of 'FIND_ANY' means we are building a bootstrap
    # compiler or one of its deps.
    # TODO: when compilers are dependencies, we should include --no-add
    if compiler_action != 'FIND_ANY':
        install_args.append('--no-add')

    # TODO: once we have the concrete spec registry, use the DAG hash
    # to identify the spec to install, rather than the concrete spec
    # yaml file.
    install_args.extend(['-f', job_spec_yaml_path])

    tty.debug('Installing {0} from source'.format(job_spec.name))
    tty.debug('spack install arguments: {0}'.format(install_args))

    # Write the install command to a shell script
    with open('install.sh', 'w') as fd:
        fd.write('#!/bin/bash\n\n')
        fd.write('\n# spack install command\n')
        fd.write(' '.join(['"{0}"'.format(i) for i in install_args]))
        fd.write('\n')

    st = os.stat('install.sh')
    os.chmod('install.sh', st.st_mode | stat.S_IEXEC)

    install_copy_path = os.path.join(repro_dir, 'install.sh')
    shutil.copyfile('install.sh', install_copy_path)

    # Run the generated install.sh shell script as if it were being run in
    # a login shell.
    try:
        install_process = subprocess.Popen(['bash', '-l', './install.sh'])
        install_process.wait()
        install_exit_code = install_process.returncode
    except (ValueError, subprocess.CalledProcessError, OSError) as inst:
        tty.error('Encountered error running install script')
        tty.error(inst)

    # Now do the post-install tasks
    tty.debug('spack install exited {0}'.format(install_exit_code))

    # If a spec fails to build in a spack develop pipeline, we add it to a
    # list of known broken full hashes.  This allows spack PR pipelines to
    # avoid wasting compute cycles attempting to build those hashes.
    if install_exit_code == INSTALL_FAIL_CODE and spack_is_develop_pipeline:
        tty.debug('Install failed on develop')
        if 'broken-specs-url' in gitlab_ci:
            broken_specs_url = gitlab_ci['broken-specs-url']
            dev_fail_hash = job_spec.full_hash()
            broken_spec_path = url_util.join(broken_specs_url, dev_fail_hash)
            tty.msg('Reporting broken develop build as: {0}'.format(
                broken_spec_path))
            tmpdir = tempfile.mkdtemp()
            empty_file_path = os.path.join(tmpdir, 'empty.txt')

            broken_spec_details = {
                'broken-spec': {
                    'job-url': get_env_var('CI_JOB_URL'),
                    'pipeline-url': get_env_var('CI_PIPELINE_URL'),
                    'concrete-spec-yaml': job_spec.to_dict(hash=ht.full_hash)
                }
            }

            try:
                with open(empty_file_path, 'w') as efd:
                    efd.write(syaml.dump(broken_spec_details))
                web_util.push_to_url(empty_file_path,
                                     broken_spec_path,
                                     keep_original=False,
                                     extra_args={'ContentType': 'text/plain'})
            except Exception as err:
                # If we got some kind of S3 (access denied or other connection
                # error), the first non boto-specific class in the exception
                # hierarchy is Exception.  Just print a warning and return
                msg = 'Error writing to broken specs list {0}: {1}'.format(
                    broken_spec_path, err)
                tty.warn(msg)
            finally:
                shutil.rmtree(tmpdir)

    # We generated the "spack install ..." command to "--keep-stage", copy
    # any logs from the staging directory to artifacts now
    spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)

    # Create buildcache on remote mirror, either on pr-specific mirror or
    # on the main mirror defined in the gitlab-enabled spack environment
    if spack_is_pr_pipeline:
        buildcache_mirror_url = pr_mirror_url
    else:
        buildcache_mirror_url = remote_mirror_url

    # If the install succeeded, create a buildcache entry for this job spec
    # and push it to one or more mirrors.  If the install did not succeed,
    # print out some instructions on how to reproduce this build failure
    # outside of the pipeline environment.
    if install_exit_code == 0:
        can_sign = spack_ci.can_sign_binaries()
        sign_binaries = can_sign and spack_is_pr_pipeline is False

        # Create buildcache in either the main remote mirror, or in the
        # per-PR mirror, if this is a PR pipeline
        if buildcache_mirror_url:
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          buildcache_mirror_url, sign_binaries)

            if cdash_build_id:
                tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
                    cdash_build_id, buildcache_mirror_url))
                spack_ci.write_cdashid_to_mirror(cdash_build_id, job_spec,
                                                 buildcache_mirror_url)

        # Create another copy of that buildcache in the per-pipeline
        # temporary storage mirror (this is only done if either
        # artifacts buildcache is enabled or a temporary storage url
        # prefix is set)
        if pipeline_mirror_url:
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          pipeline_mirror_url, sign_binaries)

            if cdash_build_id:
                tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
                    cdash_build_id, pipeline_mirror_url))
                spack_ci.write_cdashid_to_mirror(cdash_build_id, job_spec,
                                                 pipeline_mirror_url)

        # If this is a develop pipeline, check if the spec that we just built is
        # on the broken-specs list. If so, remove it.
        if spack_is_develop_pipeline and 'broken-specs-url' in gitlab_ci:
            broken_specs_url = gitlab_ci['broken-specs-url']
            just_built_hash = job_spec.full_hash()
            broken_spec_path = url_util.join(broken_specs_url, just_built_hash)
            if web_util.url_exists(broken_spec_path):
                tty.msg('Removing {0} from the list of broken specs'.format(
                    broken_spec_path))
                try:
                    web_util.remove_url(broken_spec_path)
                except Exception as err:
                    # If we got some kind of S3 (access denied or other connection
                    # error), the first non boto-specific class in the exception
                    # hierarchy is Exception.  Just print a warning and return
                    msg = 'Error removing {0} from broken specs list: {1}'.format(
                        broken_spec_path, err)
                    tty.warn(msg)

    else:
        tty.debug('spack install exited non-zero, will not create buildcache')

        api_root_url = get_env_var('CI_API_V4_URL')
        ci_project_id = get_env_var('CI_PROJECT_ID')
        ci_job_id = get_env_var('CI_JOB_ID')

        repro_job_url = '{0}/projects/{1}/jobs/{2}/artifacts'.format(
            api_root_url, ci_project_id, ci_job_id)

        # Control characters cause this to be printed in blue so it stands out
        reproduce_msg = """

\033[34mTo reproduce this build locally, run:

    spack ci reproduce-build {0} [--working-dir <dir>]

If this project does not have public pipelines, you will need to first:

    export GITLAB_PRIVATE_TOKEN=<generated_token>

... then follow the printed instructions.\033[0;0m

""".format(repro_job_url)

        print(reproduce_msg)

    # Tie job success/failure to the success/failure of building the spec
    return install_exit_code
예제 #10
0
def include_entry_exists(env, name):
    includes = config_dict(env.yaml).get('include', [])
    for entry in includes:
        if entry == name:
            return True
    return False
예제 #11
0
def validate(configuration_file):
    """Validate a Spack environment YAML file that is being used to generate a
    recipe for a container.

    Since a few attributes of the configuration must have specific values for
    the container recipe, this function returns a sanitized copy of the
    configuration in the input file. If any modification is needed, a warning
    will be issued.

    Args:
        configuration_file (str): path to the Spack environment YAML file

    Returns:
        A sanitized copy of the configuration stored in the input file
    """
    import jsonschema
    with open(configuration_file) as f:
        config = syaml.load(f)

    # Ensure we have a "container" attribute with sensible defaults set
    env_dict = ev.config_dict(config)
    env_dict.setdefault('container', {
        'format': 'docker',
        'images': {
            'os': 'ubuntu:18.04',
            'spack': 'develop'
        }
    })
    env_dict['container'].setdefault('format', 'docker')
    env_dict['container'].setdefault('images', {
        'os': 'ubuntu:18.04',
        'spack': 'develop'
    })

    # Remove attributes that are not needed / allowed in the
    # container recipe
    for subsection in ('cdash', 'gitlab_ci', 'modules'):
        if subsection in env_dict:
            msg = ('the subsection "{0}" in "{1}" is not used when generating'
                   ' container recipes and will be discarded')
            warnings.warn(msg.format(subsection, configuration_file))
            env_dict.pop(subsection)

    # Set the default value of the concretization strategy to "together" and
    # warn if the user explicitly set another value
    env_dict.setdefault('concretization', 'together')
    if env_dict['concretization'] != 'together':
        msg = ('the "concretization" attribute of the environment is set '
               'to "{0}" [the advised value is instead "together"]')
        warnings.warn(msg.format(env_dict['concretization']))

    # Check if the install tree was explicitly set to a custom value and warn
    # that it will be overridden
    environment_config = env_dict.get('config', {})
    if environment_config.get('install_tree', None):
        msg = ('the "config:install_tree" attribute has been set explicitly '
               'and will be overridden in the container image')
        warnings.warn(msg)

    # Likewise for the view
    environment_view = env_dict.get('view', None)
    if environment_view:
        msg = ('the "view" attribute has been set explicitly '
               'and will be overridden in the container image')
        warnings.warn(msg)

    jsonschema.validate(config, schema=env.schema)
    return config
예제 #12
0
def generate_gitlab_ci_yaml(env,
                            print_summary,
                            output_file,
                            custom_spack_repo=None,
                            custom_spack_ref=None):
    # FIXME: What's the difference between one that opens with 'spack'
    # and one that opens with 'env'?  This will only handle the former.
    with spack.concretize.disable_compiler_existence_check():
        env.concretize()

    yaml_root = ev.config_dict(env.yaml)

    if 'gitlab-ci' not in yaml_root:
        tty.die('Environment yaml does not have "gitlab-ci" section')

    gitlab_ci = yaml_root['gitlab-ci']
    ci_mappings = gitlab_ci['mappings']

    final_job_config = None
    if 'final-stage-rebuild-index' in gitlab_ci:
        final_job_config = gitlab_ci['final-stage-rebuild-index']

    build_group = None
    enable_cdash_reporting = False
    cdash_auth_token = None

    if 'cdash' in yaml_root:
        enable_cdash_reporting = True
        ci_cdash = yaml_root['cdash']
        build_group = ci_cdash['build-group']
        cdash_url = ci_cdash['url']
        cdash_project = ci_cdash['project']
        cdash_site = ci_cdash['site']

        if 'SPACK_CDASH_AUTH_TOKEN' in os.environ:
            tty.verbose("Using CDash auth token from environment")
            cdash_auth_token = os.environ.get('SPACK_CDASH_AUTH_TOKEN')

    # Make sure we use a custom spack if necessary
    before_script = None
    after_script = None
    if custom_spack_repo:
        if not custom_spack_ref:
            custom_spack_ref = 'master'
        before_script = [
            ('git clone "{0}" --branch "{1}" --depth 1 '
             '--single-branch'.format(custom_spack_repo, custom_spack_ref)),
            # Next line just shows spack version in pipeline output
            'pushd ./spack && git rev-parse HEAD && popd',
            '. "./spack/share/spack/setup-env.sh"',
        ]
        after_script = ['rm -rf "./spack"']

    ci_mirrors = yaml_root['mirrors']
    mirror_urls = [url for url in ci_mirrors.values()]

    enable_artifacts_buildcache = False
    if 'enable-artifacts-buildcache' in gitlab_ci:
        enable_artifacts_buildcache = gitlab_ci['enable-artifacts-buildcache']

    bootstrap_specs = []
    phases = []
    if 'bootstrap' in gitlab_ci:
        for phase in gitlab_ci['bootstrap']:
            try:
                phase_name = phase.get('name')
                strip_compilers = phase.get('compiler-agnostic')
            except AttributeError:
                phase_name = phase
                strip_compilers = False
            phases.append({
                'name': phase_name,
                'strip-compilers': strip_compilers,
            })

            for bs in env.spec_lists[phase_name]:
                bootstrap_specs.append({
                    'spec': bs,
                    'phase-name': phase_name,
                    'strip-compilers': strip_compilers,
                })

    phases.append({
        'name': 'specs',
        'strip-compilers': False,
    })

    staged_phases = {}
    for phase in phases:
        phase_name = phase['name']
        with spack.concretize.disable_compiler_existence_check():
            staged_phases[phase_name] = stage_spec_jobs(
                env.spec_lists[phase_name])

    if print_summary:
        for phase in phases:
            phase_name = phase['name']
            tty.msg('Stages for phase "{0}"'.format(phase_name))
            phase_stages = staged_phases[phase_name]
            print_staging_summary(*phase_stages)

    all_job_names = []
    output_object = {}
    job_id = 0
    stage_id = 0

    stage_names = []

    for phase in phases:
        phase_name = phase['name']
        strip_compilers = phase['strip-compilers']

        main_phase = is_main_phase(phase_name)
        spec_labels, dependencies, stages = staged_phases[phase_name]

        for stage_jobs in stages:
            stage_name = 'stage-{0}'.format(stage_id)
            stage_names.append(stage_name)
            stage_id += 1

            for spec_label in stage_jobs:
                root_spec = spec_labels[spec_label]['rootSpec']
                pkg_name = pkg_name_from_spec_label(spec_label)
                release_spec = root_spec[pkg_name]

                runner_attribs = find_matching_config(release_spec,
                                                      ci_mappings)

                if not runner_attribs:
                    tty.warn('No match found for {0}, skipping it'.format(
                        release_spec))
                    continue

                tags = [tag for tag in runner_attribs['tags']]

                variables = {}
                if 'variables' in runner_attribs:
                    variables.update(runner_attribs['variables'])

                image_name = None
                image_entry = None
                if 'image' in runner_attribs:
                    build_image = runner_attribs['image']
                    try:
                        image_name = build_image.get('name')
                        entrypoint = build_image.get('entrypoint')
                        image_entry = [p for p in entrypoint]
                    except AttributeError:
                        image_name = build_image

                osname = str(release_spec.architecture)
                job_name = get_job_name(phase_name, strip_compilers,
                                        release_spec, osname, build_group)

                debug_flag = ''
                if 'enable-debug-messages' in gitlab_ci:
                    debug_flag = '-d '

                job_scripts = ['spack {0}ci rebuild'.format(debug_flag)]

                compiler_action = 'NONE'
                if len(phases) > 1:
                    compiler_action = 'FIND_ANY'
                    if is_main_phase(phase_name):
                        compiler_action = 'INSTALL_MISSING'

                job_vars = {
                    'SPACK_ROOT_SPEC':
                    format_root_spec(root_spec, main_phase, strip_compilers),
                    'SPACK_JOB_SPEC_PKG_NAME':
                    release_spec.name,
                    'SPACK_COMPILER_ACTION':
                    compiler_action,
                }

                job_dependencies = []
                if spec_label in dependencies:
                    for dep_label in dependencies[spec_label]:
                        dep_pkg = pkg_name_from_spec_label(dep_label)
                        dep_spec = spec_labels[dep_label]['rootSpec'][dep_pkg]
                        dep_job_name = get_job_name(phase_name,
                                                    strip_compilers, dep_spec,
                                                    osname, build_group)
                        job_dependencies.append(dep_job_name)

                # This next section helps gitlab make sure the right
                # bootstrapped compiler exists in the artifacts buildcache by
                # creating an artificial dependency between this spec and its
                # compiler.  So, if we are in the main phase, and if the
                # compiler we are supposed to use is listed in any of the
                # bootstrap spec lists, then we will add one more dependency to
                # "job_dependencies" (that compiler).
                if is_main_phase(phase_name):
                    compiler_pkg_spec = compilers.pkg_spec_for_compiler(
                        release_spec.compiler)
                    for bs in bootstrap_specs:
                        bs_arch = bs['spec'].architecture
                        if (bs['spec'].satisfies(compiler_pkg_spec)
                                and bs_arch == release_spec.architecture):
                            c_job_name = get_job_name(bs['phase-name'],
                                                      bs['strip-compilers'],
                                                      bs['spec'], str(bs_arch),
                                                      build_group)
                            job_dependencies.append(c_job_name)

                if enable_cdash_reporting:
                    cdash_build_name = get_cdash_build_name(
                        release_spec, build_group)
                    all_job_names.append(cdash_build_name)

                    related_builds = []  # Used for relating CDash builds
                    if spec_label in dependencies:
                        related_builds = ([
                            spec_labels[d]['spec'].name
                            for d in dependencies[spec_label]
                        ])

                    job_vars['SPACK_CDASH_BUILD_NAME'] = cdash_build_name
                    job_vars['SPACK_RELATED_BUILDS_CDASH'] = ';'.join(
                        related_builds)

                variables.update(job_vars)

                artifact_paths = [
                    'jobs_scratch_dir',
                    'cdash_report',
                ]

                if enable_artifacts_buildcache:
                    artifact_paths.append('local_mirror/build_cache')

                job_object = {
                    'stage': stage_name,
                    'variables': variables,
                    'script': job_scripts,
                    'tags': tags,
                    'artifacts': {
                        'paths': artifact_paths,
                        'when': 'always',
                    },
                    'dependencies': job_dependencies,
                }

                if before_script:
                    job_object['before_script'] = before_script

                if after_script:
                    job_object['after_script'] = after_script

                if image_name:
                    job_object['image'] = image_name
                    if image_entry is not None:
                        job_object['image'] = {
                            'name': image_name,
                            'entrypoint': image_entry,
                        }

                output_object[job_name] = job_object
                job_id += 1

    tty.debug('{0} build jobs generated in {1} stages'.format(
        job_id, stage_id))

    # Use "all_job_names" to populate the build group for this set
    if enable_cdash_reporting and cdash_auth_token:
        try:
            populate_buildgroup(all_job_names, build_group, cdash_project,
                                cdash_site, cdash_auth_token, cdash_url)
        except (SpackError, HTTPError, URLError) as err:
            tty.warn('Problem populating buildgroup: {0}'.format(err))
    else:
        tty.warn('Unable to populate buildgroup without CDash credentials')

    if final_job_config:
        # Add an extra, final job to regenerate the index
        final_stage = 'stage-rebuild-index'
        final_job = {
            'stage': final_stage,
            'script':
            'spack buildcache update-index -d {0}'.format(mirror_urls[0]),
            'tags': final_job_config['tags']
        }
        if 'image' in final_job_config:
            final_job['image'] = final_job_config['image']
        if before_script:
            final_job['before_script'] = before_script
        if after_script:
            final_job['after_script'] = after_script
        output_object['rebuild-index'] = final_job
        stage_names.append(final_stage)

    output_object['stages'] = stage_names

    with open(output_file, 'w') as outf:
        outf.write(syaml.dump_config(output_object, default_flow_style=True))
예제 #13
0
def config_add(args):
    """Add the given configuration to the specified config scope

    This is a stateful operation that edits the config files."""
    if not (args.file or args.path):
        tty.error("No changes requested. Specify a file or value.")
        setup_parser.add_parser.print_help()
        exit(1)

    scope, section = _get_scope_and_section(args)

    # Updates from file
    if args.file:
        # Get file as config dict
        data = spack.config.read_config_file(args.file)
        if any(k in data for k in spack.schema.env.keys):
            data = ev.config_dict(data)

        # update all sections from config dict
        # We have to iterate on keys to keep overrides from the file
        for section in data.keys():
            if section in spack.config.section_schemas.keys():
                # Special handling for compiler scope difference
                # Has to be handled after we choose a section
                if scope is None:
                    scope = spack.config.default_modify_scope(section)

                value = data[section]
                existing = spack.config.get(section, scope=scope)
                new = spack.config.merge_yaml(existing, value)

                set_config(args, section, new, scope)

    if args.path:
        components = spack.config.process_config_path(args.path)

        has_existing_value = True
        path = ''
        override = False
        for idx, name in enumerate(components[:-1]):
            # First handle double colons in constructing path
            colon = '::' if override else ':' if path else ''
            path += colon + name
            if getattr(name, 'override', False):
                override = True
            else:
                override = False

            # Test whether there is an existing value at this level
            existing = spack.config.get(path, scope=scope)

            if existing is None:
                has_existing_value = False
                # We've nested further than existing config, so we need the
                # type information for validation to know how to handle bare
                # values appended to lists.
                existing = spack.config.get_valid_type(path)

                # construct value from this point down
                value = syaml.load_config(components[-1])
                for component in reversed(components[idx + 1:-1]):
                    value = {component: value}
                break

        if has_existing_value:
            path, _, value = args.path.rpartition(':')
            value = syaml.load_config(value)
            existing = spack.config.get(path, scope=scope)

        # append values to lists
        if isinstance(existing, list) and not isinstance(value, list):
            value = [value]

        # merge value into existing
        new = spack.config.merge_yaml(existing, value)
        set_config(args, path, new, scope)
예제 #14
0
def generate_gitlab_ci_yaml(env,
                            print_summary,
                            output_file,
                            prune_dag=False,
                            check_index_only=False,
                            run_optimizer=False,
                            use_dependencies=False):
    # FIXME: What's the difference between one that opens with 'spack'
    # and one that opens with 'env'?  This will only handle the former.
    with spack.concretize.disable_compiler_existence_check():
        env.concretize()

    yaml_root = ev.config_dict(env.yaml)

    if 'gitlab-ci' not in yaml_root:
        tty.die('Environment yaml does not have "gitlab-ci" section')

    gitlab_ci = yaml_root['gitlab-ci']

    build_group = None
    enable_cdash_reporting = False
    cdash_auth_token = None

    if 'cdash' in yaml_root:
        enable_cdash_reporting = True
        ci_cdash = yaml_root['cdash']
        build_group = ci_cdash['build-group']
        cdash_url = ci_cdash['url']
        cdash_project = ci_cdash['project']
        cdash_site = ci_cdash['site']

        if 'SPACK_CDASH_AUTH_TOKEN' in os.environ:
            tty.verbose("Using CDash auth token from environment")
            cdash_auth_token = os.environ.get('SPACK_CDASH_AUTH_TOKEN')

    is_pr_pipeline = (os.environ.get('SPACK_IS_PR_PIPELINE',
                                     '').lower() == 'true')

    spack_pr_branch = os.environ.get('SPACK_PR_BRANCH', None)
    pr_mirror_url = None
    if spack_pr_branch:
        pr_mirror_url = url_util.join(SPACK_PR_MIRRORS_ROOT_URL,
                                      spack_pr_branch)

    if 'mirrors' not in yaml_root or len(yaml_root['mirrors'].values()) < 1:
        tty.die('spack ci generate requires an env containing a mirror')

    ci_mirrors = yaml_root['mirrors']
    mirror_urls = [url for url in ci_mirrors.values()]

    enable_artifacts_buildcache = False
    if 'enable-artifacts-buildcache' in gitlab_ci:
        enable_artifacts_buildcache = gitlab_ci['enable-artifacts-buildcache']

    rebuild_index_enabled = True
    if 'rebuild-index' in gitlab_ci and gitlab_ci['rebuild-index'] is False:
        rebuild_index_enabled = False

    temp_storage_url_prefix = None
    if 'temporary-storage-url-prefix' in gitlab_ci:
        temp_storage_url_prefix = gitlab_ci['temporary-storage-url-prefix']

    bootstrap_specs = []
    phases = []
    if 'bootstrap' in gitlab_ci:
        for phase in gitlab_ci['bootstrap']:
            try:
                phase_name = phase.get('name')
                strip_compilers = phase.get('compiler-agnostic')
            except AttributeError:
                phase_name = phase
                strip_compilers = False
            phases.append({
                'name': phase_name,
                'strip-compilers': strip_compilers,
            })

            for bs in env.spec_lists[phase_name]:
                bootstrap_specs.append({
                    'spec': bs,
                    'phase-name': phase_name,
                    'strip-compilers': strip_compilers,
                })

    phases.append({
        'name': 'specs',
        'strip-compilers': False,
    })

    # Add this mirror if it's enabled, as some specs might be up to date
    # here and thus not need to be rebuilt.
    if pr_mirror_url:
        add_pr_mirror(pr_mirror_url)

    # Speed up staging by first fetching binary indices from all mirrors
    # (including the per-PR mirror we may have just added above).
    bindist.binary_index.update()

    staged_phases = {}
    try:
        for phase in phases:
            phase_name = phase['name']
            with spack.concretize.disable_compiler_existence_check():
                staged_phases[phase_name] = stage_spec_jobs(
                    env.spec_lists[phase_name],
                    check_index_only=check_index_only)
    finally:
        # Clean up PR mirror if enabled
        if pr_mirror_url:
            remove_pr_mirror()

    all_job_names = []
    output_object = {}
    job_id = 0
    stage_id = 0

    stage_names = []

    max_length_needs = 0
    max_needs_job = ''

    before_script, after_script = None, None
    for phase in phases:
        phase_name = phase['name']
        strip_compilers = phase['strip-compilers']

        main_phase = is_main_phase(phase_name)
        spec_labels, dependencies, stages = staged_phases[phase_name]

        for stage_jobs in stages:
            stage_name = 'stage-{0}'.format(stage_id)
            stage_names.append(stage_name)
            stage_id += 1

            for spec_label in stage_jobs:
                spec_record = spec_labels[spec_label]
                root_spec = spec_record['rootSpec']
                pkg_name = pkg_name_from_spec_label(spec_label)
                release_spec = root_spec[pkg_name]

                runner_attribs = find_matching_config(release_spec, gitlab_ci)

                if not runner_attribs:
                    tty.warn('No match found for {0}, skipping it'.format(
                        release_spec))
                    continue

                tags = [tag for tag in runner_attribs['tags']]

                variables = {}
                if 'variables' in runner_attribs:
                    variables.update(runner_attribs['variables'])

                image_name = None
                image_entry = None
                if 'image' in runner_attribs:
                    build_image = runner_attribs['image']
                    try:
                        image_name = build_image.get('name')
                        entrypoint = build_image.get('entrypoint')
                        image_entry = [p for p in entrypoint]
                    except AttributeError:
                        image_name = build_image

                job_script = [
                    'spack env activate --without-view .',
                    'spack ci rebuild',
                ]
                if 'script' in runner_attribs:
                    job_script = [s for s in runner_attribs['script']]

                before_script = None
                if 'before_script' in runner_attribs:
                    before_script = [
                        s for s in runner_attribs['before_script']
                    ]

                after_script = None
                if 'after_script' in runner_attribs:
                    after_script = [s for s in runner_attribs['after_script']]

                osname = str(release_spec.architecture)
                job_name = get_job_name(phase_name, strip_compilers,
                                        release_spec, osname, build_group)

                compiler_action = 'NONE'
                if len(phases) > 1:
                    compiler_action = 'FIND_ANY'
                    if is_main_phase(phase_name):
                        compiler_action = 'INSTALL_MISSING'

                job_vars = {
                    'SPACK_ROOT_SPEC':
                    format_root_spec(root_spec, main_phase, strip_compilers),
                    'SPACK_JOB_SPEC_PKG_NAME':
                    release_spec.name,
                    'SPACK_COMPILER_ACTION':
                    compiler_action,
                    'SPACK_IS_PR_PIPELINE':
                    str(is_pr_pipeline),
                }

                job_dependencies = []
                if spec_label in dependencies:
                    if enable_artifacts_buildcache:
                        # Get dependencies transitively, so they're all
                        # available in the artifacts buildcache.
                        dep_jobs = [
                            d for d in release_spec.traverse(deptype=all,
                                                             root=False)
                        ]
                    else:
                        # In this case, "needs" is only used for scheduling
                        # purposes, so we only get the direct dependencies.
                        dep_jobs = []
                        for dep_label in dependencies[spec_label]:
                            dep_pkg = pkg_name_from_spec_label(dep_label)
                            dep_root = spec_labels[dep_label]['rootSpec']
                            dep_jobs.append(dep_root[dep_pkg])

                    job_dependencies.extend(
                        format_job_needs(phase_name, strip_compilers, dep_jobs,
                                         osname, build_group, prune_dag,
                                         spec_labels,
                                         enable_artifacts_buildcache))

                rebuild_spec = spec_record['needs_rebuild']

                # This next section helps gitlab make sure the right
                # bootstrapped compiler exists in the artifacts buildcache by
                # creating an artificial dependency between this spec and its
                # compiler.  So, if we are in the main phase, and if the
                # compiler we are supposed to use is listed in any of the
                # bootstrap spec lists, then we will add more dependencies to
                # the job (that compiler and maybe it's dependencies as well).
                if is_main_phase(phase_name):
                    spec_arch_family = (release_spec.architecture.target.
                                        microarchitecture.family)
                    compiler_pkg_spec = compilers.pkg_spec_for_compiler(
                        release_spec.compiler)
                    for bs in bootstrap_specs:
                        c_spec = bs['spec']
                        bs_arch = c_spec.architecture
                        bs_arch_family = (
                            bs_arch.target.microarchitecture.family)
                        if (c_spec.satisfies(compiler_pkg_spec)
                                and bs_arch_family == spec_arch_family):
                            # We found the bootstrap compiler this release spec
                            # should be built with, so for DAG scheduling
                            # purposes, we will at least add the compiler spec
                            # to the jobs "needs".  But if artifact buildcache
                            # is enabled, we'll have to add all transtive deps
                            # of the compiler as well.

                            # Here we check whether the bootstrapped compiler
                            # needs to be rebuilt.  Until compilers are proper
                            # dependencies, we artificially force the spec to
                            # be rebuilt if the compiler targeted to build it
                            # needs to be rebuilt.
                            bs_specs, _, _ = staged_phases[bs['phase-name']]
                            c_spec_key = spec_deps_key(c_spec)
                            rbld_comp = bs_specs[c_spec_key]['needs_rebuild']
                            rebuild_spec = rebuild_spec or rbld_comp
                            # Also update record so dependents do not fail to
                            # add this spec to their "needs"
                            spec_record['needs_rebuild'] = rebuild_spec

                            dep_jobs = [c_spec]
                            if enable_artifacts_buildcache:
                                dep_jobs = [
                                    d for d in c_spec.traverse(deptype=all)
                                ]

                            job_dependencies.extend(
                                format_job_needs(bs['phase-name'],
                                                 bs['strip-compilers'],
                                                 dep_jobs, str(bs_arch),
                                                 build_group, prune_dag,
                                                 bs_specs,
                                                 enable_artifacts_buildcache))
                        else:
                            debug_msg = ''.join([
                                'Considered compiler {0} for spec ',
                                '{1}, but rejected it either because it was ',
                                'not the compiler required by the spec, or ',
                                'because the target arch families of the ',
                                'spec and the compiler did not match'
                            ]).format(c_spec, release_spec)
                            tty.debug(debug_msg)

                if prune_dag and not rebuild_spec:
                    continue

                job_vars['SPACK_SPEC_NEEDS_REBUILD'] = str(rebuild_spec)

                if enable_cdash_reporting:
                    cdash_build_name = get_cdash_build_name(
                        release_spec, build_group)
                    all_job_names.append(cdash_build_name)

                    related_builds = []  # Used for relating CDash builds
                    if spec_label in dependencies:
                        related_builds = ([
                            spec_labels[d]['spec'].name
                            for d in dependencies[spec_label]
                        ])

                    job_vars['SPACK_CDASH_BUILD_NAME'] = cdash_build_name
                    job_vars['SPACK_RELATED_BUILDS_CDASH'] = ';'.join(
                        sorted(related_builds))

                variables.update(job_vars)

                artifact_paths = [
                    'jobs_scratch_dir',
                    'cdash_report',
                ]

                if enable_artifacts_buildcache:
                    bc_root = 'local_mirror/build_cache'
                    artifact_paths.extend([
                        os.path.join(bc_root, p) for p in [
                            bindist.tarball_name(release_spec, '.spec.yaml'),
                            bindist.tarball_name(release_spec, '.cdashid'),
                            bindist.tarball_directory_name(release_spec),
                        ]
                    ])

                job_object = {
                    'stage': stage_name,
                    'variables': variables,
                    'script': job_script,
                    'tags': tags,
                    'artifacts': {
                        'paths': artifact_paths,
                        'when': 'always',
                    },
                    'needs': sorted(job_dependencies, key=lambda d: d['job']),
                    'retry': {
                        'max': 2,
                        'when': JOB_RETRY_CONDITIONS,
                    },
                    'interruptible': True
                }

                length_needs = len(job_dependencies)
                if length_needs > max_length_needs:
                    max_length_needs = length_needs
                    max_needs_job = job_name

                if before_script:
                    job_object['before_script'] = before_script

                if after_script:
                    job_object['after_script'] = after_script

                if image_name:
                    job_object['image'] = image_name
                    if image_entry is not None:
                        job_object['image'] = {
                            'name': image_name,
                            'entrypoint': image_entry,
                        }

                output_object[job_name] = job_object
                job_id += 1

    if print_summary:
        for phase in phases:
            phase_name = phase['name']
            tty.msg('Stages for phase "{0}"'.format(phase_name))
            phase_stages = staged_phases[phase_name]
            print_staging_summary(*phase_stages)

    tty.debug('{0} build jobs generated in {1} stages'.format(
        job_id, stage_id))

    if job_id > 0:
        tty.debug('The max_needs_job is {0}, with {1} needs'.format(
            max_needs_job, max_length_needs))

    # Use "all_job_names" to populate the build group for this set
    if enable_cdash_reporting and cdash_auth_token:
        try:
            populate_buildgroup(all_job_names, build_group, cdash_project,
                                cdash_site, cdash_auth_token, cdash_url)
        except (SpackError, HTTPError, URLError) as err:
            tty.warn('Problem populating buildgroup: {0}'.format(err))
    else:
        tty.warn('Unable to populate buildgroup without CDash credentials')

    service_job_config = None
    if 'service-job-attributes' in gitlab_ci:
        service_job_config = gitlab_ci['service-job-attributes']

    default_attrs = [
        'image',
        'tags',
        'variables',
        'before_script',
        # 'script',
        'after_script',
    ]

    if job_id > 0:
        if temp_storage_url_prefix:
            # There were some rebuild jobs scheduled, so we will need to
            # schedule a job to clean up the temporary storage location
            # associated with this pipeline.
            stage_names.append('cleanup-temp-storage')
            cleanup_job = {}

            if service_job_config:
                copy_attributes(default_attrs, service_job_config, cleanup_job)

            cleanup_job['stage'] = 'cleanup-temp-storage'
            cleanup_job['script'] = [
                'spack -d mirror destroy --mirror-url {0}/$CI_PIPELINE_ID'.
                format(temp_storage_url_prefix)
            ]
            cleanup_job['when'] = 'always'

            output_object['cleanup'] = cleanup_job

        if rebuild_index_enabled:
            # Add a final job to regenerate the index
            stage_names.append('stage-rebuild-index')
            final_job = {}

            if service_job_config:
                copy_attributes(default_attrs, service_job_config, final_job)

            index_target_mirror = mirror_urls[0]
            if is_pr_pipeline:
                index_target_mirror = pr_mirror_url

            final_job['stage'] = 'stage-rebuild-index'
            final_job['script'] = [
                'spack buildcache update-index --keys -d {0}'.format(
                    index_target_mirror)
            ]
            final_job['when'] = 'always'

            output_object['rebuild-index'] = final_job

        output_object['stages'] = stage_names

        # Capture the version of spack used to generate the pipeline, transform it
        # into a value that can be passed to "git checkout", and save it in a
        # global yaml variable
        spack_version = spack.main.get_version()
        version_to_clone = None
        v_match = re.match(r"^\d+\.\d+\.\d+$", spack_version)
        if v_match:
            version_to_clone = 'v{0}'.format(v_match.group(0))
        else:
            v_match = re.match(r"^[^-]+-[^-]+-([a-f\d]+)$", spack_version)
            if v_match:
                version_to_clone = v_match.group(1)
            else:
                version_to_clone = spack_version

        output_object['variables'] = {
            'SPACK_VERSION': spack_version,
            'SPACK_CHECKOUT_VERSION': version_to_clone,
        }

        if pr_mirror_url:
            output_object['variables']['SPACK_PR_MIRROR_URL'] = pr_mirror_url

        sorted_output = {}
        for output_key, output_value in sorted(output_object.items()):
            sorted_output[output_key] = output_value

        # TODO(opadron): remove this or refactor
        if run_optimizer:
            import spack.ci_optimization as ci_opt
            sorted_output = ci_opt.optimizer(sorted_output)

        # TODO(opadron): remove this or refactor
        if use_dependencies:
            import spack.ci_needs_workaround as cinw
            sorted_output = cinw.needs_to_dependencies(sorted_output)
    else:
        # No jobs were generated
        tty.debug('No specs to rebuild, generating no-op job')
        noop_job = {}

        if service_job_config:
            copy_attributes(default_attrs, service_job_config, noop_job)

        if 'script' not in noop_job:
            noop_job['script'] = [
                'echo "All specs already up to date, nothing to rebuild."',
            ]

        sorted_output = {'no-specs-to-rebuild': noop_job}

    with open(output_file, 'w') as outf:
        outf.write(syaml.dump_config(sorted_output, default_flow_style=True))
예제 #15
0
파일: ci.py 프로젝트: mmmllf/spack
def ci_rebuild(args):
    """This command represents a gitlab-ci job, corresponding to a single
       release spec.  As such it must first decide whether or not the spec it
       has been assigned to build is up to date on the remote binary mirror.
       If it is not (i.e. the full_hash of the spec as computed locally does
       not match the one stored in the metadata on the mirror), this script
       will build the package, create a binary cache for it, and then push all
       related files to the remote binary mirror.  This script also
       communicates with a remote CDash instance to share status on the package
       build process.

       The spec to be built by this job is represented by essentially two
       pieces of information: 1) a root spec (possibly already concrete, but
       maybe still needing to be concretized) and 2) a package name used to
       index that root spec (once the root is, for certain, concrete)."""
    env = ev.get_env(args, 'ci rebuild', required=True)
    yaml_root = ev.config_dict(env.yaml)

    # The following environment variables should defined in the CI
    # infrastructre (or some other external source) in the case that the
    # remote mirror is an S3 bucket.  The AWS keys are used to upload
    # buildcache entries to S3 using the boto3 api.
    #
    # AWS_ACCESS_KEY_ID
    # AWS_SECRET_ACCESS_KEY
    # S3_ENDPOINT_URL (only needed for non-AWS S3 implementations)
    #
    # If present, we will import the  SPACK_SIGNING_KEY using the
    # "spack gpg trust" command, so it can be used both for verifying
    # dependency buildcache entries and signing the buildcache entry we create
    # for our target pkg.
    #
    # SPACK_SIGNING_KEY

    ci_artifact_dir = get_env_var('CI_PROJECT_DIR')
    ci_pipeline_id = get_env_var('CI_PIPELINE_ID')
    signing_key = get_env_var('SPACK_SIGNING_KEY')
    root_spec = get_env_var('SPACK_ROOT_SPEC')
    job_spec_pkg_name = get_env_var('SPACK_JOB_SPEC_PKG_NAME')
    compiler_action = get_env_var('SPACK_COMPILER_ACTION')
    cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
    related_builds = get_env_var('SPACK_RELATED_BUILDS_CDASH')
    pr_env_var = get_env_var('SPACK_IS_PR_PIPELINE')
    pr_mirror_url = get_env_var('SPACK_PR_MIRROR_URL')

    gitlab_ci = None
    if 'gitlab-ci' in yaml_root:
        gitlab_ci = yaml_root['gitlab-ci']

    if not gitlab_ci:
        tty.die('spack ci rebuild requires an env containing gitlab-ci cfg')

    enable_cdash = False
    if 'cdash' in yaml_root:
        enable_cdash = True
        ci_cdash = yaml_root['cdash']
        job_spec_buildgroup = ci_cdash['build-group']
        cdash_base_url = ci_cdash['url']
        cdash_project = ci_cdash['project']
        proj_enc = urlencode({'project': cdash_project})
        eq_idx = proj_enc.find('=') + 1
        cdash_project_enc = proj_enc[eq_idx:]
        cdash_site = ci_cdash['site']
        tty.debug('cdash_base_url = {0}'.format(cdash_base_url))
        tty.debug('cdash_project = {0}'.format(cdash_project))
        tty.debug('cdash_project_enc = {0}'.format(cdash_project_enc))
        tty.debug('cdash_build_name = {0}'.format(cdash_build_name))
        tty.debug('cdash_site = {0}'.format(cdash_site))
        tty.debug('related_builds = {0}'.format(related_builds))
        tty.debug('job_spec_buildgroup = {0}'.format(job_spec_buildgroup))

    remote_mirror_url = None
    if 'mirrors' in yaml_root:
        ci_mirrors = yaml_root['mirrors']
        mirror_urls = [url for url in ci_mirrors.values()]
        remote_mirror_url = mirror_urls[0]

    if not remote_mirror_url:
        tty.die('spack ci rebuild requires an env containing a mirror')

    tty.debug('ci_artifact_dir = {0}'.format(ci_artifact_dir))
    tty.debug('root_spec = {0}'.format(root_spec))
    tty.debug('remote_mirror_url = {0}'.format(remote_mirror_url))
    tty.debug('job_spec_pkg_name = {0}'.format(job_spec_pkg_name))
    tty.debug('compiler_action = {0}'.format(compiler_action))

    cdash_report_dir = os.path.join(ci_artifact_dir, 'cdash_report')
    temp_dir = os.path.join(ci_artifact_dir, 'jobs_scratch_dir')
    job_log_dir = os.path.join(temp_dir, 'logs')
    spec_dir = os.path.join(temp_dir, 'specs')

    local_mirror_dir = os.path.join(ci_artifact_dir, 'local_mirror')
    build_cache_dir = os.path.join(local_mirror_dir, 'build_cache')

    spack_is_pr_pipeline = True if pr_env_var == 'True' else False

    pipeline_mirror_url = None
    temp_storage_url_prefix = None
    if 'temporary-storage-url-prefix' in gitlab_ci:
        temp_storage_url_prefix = gitlab_ci['temporary-storage-url-prefix']
        pipeline_mirror_url = url_util.join(
            temp_storage_url_prefix, ci_pipeline_id)

    enable_artifacts_mirror = False
    if 'enable-artifacts-buildcache' in gitlab_ci:
        enable_artifacts_mirror = gitlab_ci['enable-artifacts-buildcache']
        if (enable_artifacts_mirror or (spack_is_pr_pipeline and
            not enable_artifacts_mirror and not temp_storage_url_prefix)):
            # If you explicitly enabled the artifacts buildcache feature, or
            # if this is a PR pipeline but you did not enable either of the
            # per-pipeline temporary storage features, we force the use of
            # artifacts buildcache.  Otherwise jobs will not have binary
            # dependencies from previous stages available since we do not
            # allow pushing binaries to the remote mirror during PR pipelines.
            enable_artifacts_mirror = True
            pipeline_mirror_url = 'file://' + local_mirror_dir
            mirror_msg = 'artifact buildcache enabled, mirror url: {0}'.format(
                pipeline_mirror_url)
            tty.debug(mirror_msg)

    # Clean out scratch directory from last stage
    if os.path.exists(temp_dir):
        shutil.rmtree(temp_dir)

    if os.path.exists(cdash_report_dir):
        shutil.rmtree(cdash_report_dir)

    os.makedirs(job_log_dir)
    os.makedirs(spec_dir)

    job_spec_yaml_path = os.path.join(
        spec_dir, '{0}.yaml'.format(job_spec_pkg_name))
    job_log_file = os.path.join(job_log_dir, 'pipeline_log.txt')

    cdash_build_id = None
    cdash_build_stamp = None

    with open(job_log_file, 'w') as log_fd:
        os.dup2(log_fd.fileno(), sys.stdout.fileno())
        os.dup2(log_fd.fileno(), sys.stderr.fileno())

        current_directory = os.getcwd()
        tty.debug('Current working directory: {0}, Contents:'.format(
            current_directory))
        directory_list = os.listdir(current_directory)
        for next_entry in directory_list:
            tty.debug('  {0}'.format(next_entry))

        tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path))

        if signing_key:
            spack_ci.import_signing_key(signing_key)

        can_sign = spack_ci.can_sign_binaries()
        sign_binaries = can_sign and spack_is_pr_pipeline is False

        can_verify = spack_ci.can_verify_binaries()
        verify_binaries = can_verify and spack_is_pr_pipeline is False

        spack_ci.configure_compilers(compiler_action)

        spec_map = spack_ci.get_concrete_specs(
            root_spec, job_spec_pkg_name, related_builds, compiler_action)

        job_spec = spec_map[job_spec_pkg_name]

        tty.debug('Here is the concrete spec: {0}'.format(job_spec))

        with open(job_spec_yaml_path, 'w') as fd:
            fd.write(job_spec.to_yaml(hash=ht.build_hash))

        tty.debug('Done writing concrete spec')

        # DEBUG
        with open(job_spec_yaml_path) as fd:
            tty.debug('Wrote spec file, read it back.  Contents:')
            tty.debug(fd.read())

        # DEBUG the root spec
        root_spec_yaml_path = os.path.join(spec_dir, 'root.yaml')
        with open(root_spec_yaml_path, 'w') as fd:
            fd.write(spec_map['root'].to_yaml(hash=ht.build_hash))

        # TODO: Refactor the spack install command so it's easier to use from
        # python modules.  Currently we use "exe.which('spack')" to make it
        # easier to install packages from here, but it introduces some
        # problems, e.g. if we want the spack command to have access to the
        # mirrors we're configuring, then we have to use the "spack" command
        # to add the mirrors too, which in turn means that any code here *not*
        # using the spack command does *not* have access to the mirrors.
        spack_cmd = exe.which('spack')
        mirrors_to_check = {
            'ci_remote_mirror': remote_mirror_url,
        }

        def add_mirror(mirror_name, mirror_url):
            m_args = ['mirror', 'add', mirror_name, mirror_url]
            tty.debug('Adding mirror: spack {0}'.format(m_args))
            mirror_add_output = spack_cmd(*m_args)
            # Workaround: Adding the mirrors above, using "spack_cmd" makes
            # sure they're available later when we use "spack_cmd" to install
            # the package.  But then we also need to add them to this dict
            # below, so they're available in this process (we end up having to
            # pass them to "bindist.get_mirrors_for_spec()")
            mirrors_to_check[mirror_name] = mirror_url
            tty.debug('spack mirror add output: {0}'.format(mirror_add_output))

        # Configure mirrors
        if pr_mirror_url:
            add_mirror('ci_pr_mirror', pr_mirror_url)

        if pipeline_mirror_url:
            add_mirror(spack_ci.TEMP_STORAGE_MIRROR_NAME, pipeline_mirror_url)

        tty.debug('listing spack mirrors:')
        spack_cmd('mirror', 'list')
        spack_cmd('config', 'blame', 'mirrors')

        # Checks all mirrors for a built spec with a matching full hash
        matches = bindist.get_mirrors_for_spec(
            job_spec, full_hash_match=True, mirrors_to_check=mirrors_to_check,
            index_only=False)

        if matches:
            # Got at full hash match on at least one configured mirror.  All
            # matches represent the fully up-to-date spec, so should all be
            # equivalent.  If artifacts mirror is enabled, we just pick one
            # of the matches and download the buildcache files from there to
            # the artifacts, so they're available to be used by dependent
            # jobs in subsequent stages.
            tty.debug('No need to rebuild {0}'.format(job_spec_pkg_name))
            if enable_artifacts_mirror:
                matching_mirror = matches[0]['mirror_url']
                tty.debug('Getting {0} buildcache from {1}'.format(
                    job_spec_pkg_name, matching_mirror))
                tty.debug('Downloading to {0}'.format(build_cache_dir))
                buildcache.download_buildcache_files(
                    job_spec, build_cache_dir, True, matching_mirror)
        else:
            # No full hash match anywhere means we need to rebuild spec

            # Build up common install arguments
            install_args = [
                '-d', '-v', '-k', 'install',
                '--keep-stage',
                '--require-full-hash-match',
            ]

            if not verify_binaries:
                install_args.append('--no-check-signature')

            # Add arguments to create + register a new build on CDash (if
            # enabled)
            if enable_cdash:
                tty.debug('Registering build with CDash')
                (cdash_build_id,
                    cdash_build_stamp) = spack_ci.register_cdash_build(
                    cdash_build_name, cdash_base_url, cdash_project,
                    cdash_site, job_spec_buildgroup)

                cdash_upload_url = '{0}/submit.php?project={1}'.format(
                    cdash_base_url, cdash_project_enc)

                install_args.extend([
                    '--cdash-upload-url', cdash_upload_url,
                    '--cdash-build', cdash_build_name,
                    '--cdash-site', cdash_site,
                    '--cdash-buildstamp', cdash_build_stamp,
                ])

            install_args.append(job_spec_yaml_path)

            tty.debug('Installing {0} from source'.format(job_spec.name))

            try:
                tty.debug('spack install arguments: {0}'.format(
                    install_args))
                spack_cmd(*install_args)
            finally:
                spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)

            # Create buildcache on remote mirror, either on pr-specific
            # mirror or on mirror defined in spack environment
            if spack_is_pr_pipeline:
                buildcache_mirror_url = pr_mirror_url
            else:
                buildcache_mirror_url = remote_mirror_url

            try:
                spack_ci.push_mirror_contents(
                    env, job_spec, job_spec_yaml_path, buildcache_mirror_url,
                    cdash_build_id, sign_binaries)
            except Exception as inst:
                # If the mirror we're pushing to is on S3 and there's some
                # permissions problem, for example, we can't just target
                # that exception type here, since users of the
                # `spack ci rebuild' may not need or want any dependency
                # on boto3.  So we use the first non-boto exception type
                # in the heirarchy:
                #     boto3.exceptions.S3UploadFailedError
                #     boto3.exceptions.Boto3Error
                #     Exception
                #     BaseException
                #     object
                err_msg = 'Error msg: {0}'.format(inst)
                if 'Access Denied' in err_msg:
                    tty.msg('Permission problem writing to mirror')
                tty.msg(err_msg)

            # Create another copy of that buildcache in the per-pipeline
            # temporary storage mirror (this is only done if either artifacts
            # buildcache is enabled or a temporary storage url prefix is set)
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          pipeline_mirror_url, cdash_build_id,
                                          sign_binaries)

            # Relate this build to its dependencies on CDash (if enabled)
            if enable_cdash:
                spack_ci.relate_cdash_builds(
                    spec_map, cdash_base_url, cdash_build_id, cdash_project,
                    pipeline_mirror_url or pr_mirror_url or remote_mirror_url)
예제 #16
0
파일: __init__.py 프로젝트: wangvsa/spack
 def __init__(self, config):
     self.config = ev.config_dict(config)
     self.container_config = self.config['container']
예제 #17
0
def ci_rebuild(args):
    """This command represents a gitlab-ci job, corresponding to a single
       release spec.  As such it must first decide whether or not the spec it
       has been assigned to build is up to date on the remote binary mirror.
       If it is not (i.e. the full_hash of the spec as computed locally does
       not match the one stored in the metadata on the mirror), this script
       will build the package, create a binary cache for it, and then push all
       related files to the remote binary mirror.  This script also
       communicates with a remote CDash instance to share status on the package
       build process.

       The spec to be built by this job is represented by essentially two
       pieces of information: 1) a root spec (possibly already concrete, but
       maybe still needing to be concretized) and 2) a package name used to
       index that root spec (once the root is, for certain, concrete)."""
    env = ev.get_env(args, 'ci rebuild', required=True)
    yaml_root = ev.config_dict(env.yaml)

    # The following environment variables should defined in the CI
    # infrastructre (or some other external source) in the case that the
    # remote mirror is an S3 bucket.  The AWS keys are used to upload
    # buildcache entries to S3 using the boto3 api.
    #
    # AWS_ACCESS_KEY_ID
    # AWS_SECRET_ACCESS_KEY
    # S3_ENDPOINT_URL (only needed for non-AWS S3 implementations)
    #
    # If present, we will import the  SPACK_SIGNING_KEY using the
    # "spack gpg trust" command, so it can be used both for verifying
    # dependency buildcache entries and signing the buildcache entry we create
    # for our target pkg.
    #
    # SPACK_SIGNING_KEY

    ci_artifact_dir = get_env_var('CI_PROJECT_DIR')
    signing_key = get_env_var('SPACK_SIGNING_KEY')
    root_spec = get_env_var('SPACK_ROOT_SPEC')
    job_spec_pkg_name = get_env_var('SPACK_JOB_SPEC_PKG_NAME')
    compiler_action = get_env_var('SPACK_COMPILER_ACTION')
    cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
    related_builds = get_env_var('SPACK_RELATED_BUILDS_CDASH')
    pr_env_var = get_env_var('SPACK_IS_PR_PIPELINE')

    gitlab_ci = None
    if 'gitlab-ci' in yaml_root:
        gitlab_ci = yaml_root['gitlab-ci']

    if not gitlab_ci:
        tty.die('spack ci rebuild requires an env containing gitlab-ci cfg')

    enable_cdash = False
    if 'cdash' in yaml_root:
        enable_cdash = True
        ci_cdash = yaml_root['cdash']
        job_spec_buildgroup = ci_cdash['build-group']
        cdash_base_url = ci_cdash['url']
        cdash_project = ci_cdash['project']
        proj_enc = urlencode({'project': cdash_project})
        eq_idx = proj_enc.find('=') + 1
        cdash_project_enc = proj_enc[eq_idx:]
        cdash_site = ci_cdash['site']
        tty.debug('cdash_base_url = {0}'.format(cdash_base_url))
        tty.debug('cdash_project = {0}'.format(cdash_project))
        tty.debug('cdash_project_enc = {0}'.format(cdash_project_enc))
        tty.debug('cdash_build_name = {0}'.format(cdash_build_name))
        tty.debug('cdash_site = {0}'.format(cdash_site))
        tty.debug('related_builds = {0}'.format(related_builds))
        tty.debug('job_spec_buildgroup = {0}'.format(job_spec_buildgroup))

    remote_mirror_url = None
    if 'mirrors' in yaml_root:
        ci_mirrors = yaml_root['mirrors']
        mirror_urls = [url for url in ci_mirrors.values()]
        remote_mirror_url = mirror_urls[0]

    if not remote_mirror_url:
        tty.die('spack ci rebuild requires an env containing a mirror')

    tty.debug('ci_artifact_dir = {0}'.format(ci_artifact_dir))
    tty.debug('root_spec = {0}'.format(root_spec))
    tty.debug('remote_mirror_url = {0}'.format(remote_mirror_url))
    tty.debug('job_spec_pkg_name = {0}'.format(job_spec_pkg_name))
    tty.debug('compiler_action = {0}'.format(compiler_action))

    spack_cmd = exe.which('spack')

    cdash_report_dir = os.path.join(ci_artifact_dir, 'cdash_report')
    temp_dir = os.path.join(ci_artifact_dir, 'jobs_scratch_dir')
    job_log_dir = os.path.join(temp_dir, 'logs')
    spec_dir = os.path.join(temp_dir, 'specs')

    local_mirror_dir = os.path.join(ci_artifact_dir, 'local_mirror')
    build_cache_dir = os.path.join(local_mirror_dir, 'build_cache')

    spack_is_pr_pipeline = True if pr_env_var == 'True' else False

    enable_artifacts_mirror = False
    artifact_mirror_url = None
    if 'enable-artifacts-buildcache' in gitlab_ci:
        enable_artifacts_mirror = gitlab_ci['enable-artifacts-buildcache']
        if enable_artifacts_mirror or spack_is_pr_pipeline:
            # If this is a PR pipeline, we will override the setting to
            # make sure that artifacts buildcache is enabled.  Otherwise
            # jobs will not have binary deps available since we do not
            # allow pushing binaries to remote mirror during PR pipelines
            enable_artifacts_mirror = True
            artifact_mirror_url = 'file://' + local_mirror_dir
            mirror_msg = 'artifact buildcache enabled, mirror url: {0}'.format(
                artifact_mirror_url)
            tty.debug(mirror_msg)

    # Clean out scratch directory from last stage
    if os.path.exists(temp_dir):
        shutil.rmtree(temp_dir)

    if os.path.exists(cdash_report_dir):
        shutil.rmtree(cdash_report_dir)

    os.makedirs(job_log_dir)
    os.makedirs(spec_dir)

    job_spec_yaml_path = os.path.join(spec_dir,
                                      '{0}.yaml'.format(job_spec_pkg_name))
    job_log_file = os.path.join(job_log_dir, 'pipeline_log.txt')

    cdash_build_id = None
    cdash_build_stamp = None

    with open(job_log_file, 'w') as log_fd:
        os.dup2(log_fd.fileno(), sys.stdout.fileno())
        os.dup2(log_fd.fileno(), sys.stderr.fileno())

        current_directory = os.getcwd()
        tty.debug('Current working directory: {0}, Contents:'.format(
            current_directory))
        directory_list = os.listdir(current_directory)
        for next_entry in directory_list:
            tty.debug('  {0}'.format(next_entry))

        # Make a copy of the environment file, so we can overwrite the changed
        # version in between the two invocations of "spack install"
        env_src_path = env.manifest_path
        env_dirname = os.path.dirname(env_src_path)
        env_filename = os.path.basename(env_src_path)
        env_copyname = '{0}_BACKUP'.format(env_filename)
        env_dst_path = os.path.join(env_dirname, env_copyname)
        shutil.copyfile(env_src_path, env_dst_path)

        tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path))

        if signing_key:
            spack_ci.import_signing_key(signing_key)

        spack_ci.configure_compilers(compiler_action)

        spec_map = spack_ci.get_concrete_specs(root_spec, job_spec_pkg_name,
                                               related_builds, compiler_action)

        job_spec = spec_map[job_spec_pkg_name]

        tty.debug('Here is the concrete spec: {0}'.format(job_spec))

        with open(job_spec_yaml_path, 'w') as fd:
            fd.write(job_spec.to_yaml(hash=ht.build_hash))

        tty.debug('Done writing concrete spec')

        # DEBUG
        with open(job_spec_yaml_path) as fd:
            tty.debug('Wrote spec file, read it back.  Contents:')
            tty.debug(fd.read())

        # DEBUG the root spec
        root_spec_yaml_path = os.path.join(spec_dir, 'root.yaml')
        with open(root_spec_yaml_path, 'w') as fd:
            fd.write(spec_map['root'].to_yaml(hash=ht.build_hash))

        if bindist.needs_rebuild(job_spec, remote_mirror_url, True):
            # Binary on remote mirror is not up to date, we need to rebuild
            # it.
            #
            # FIXME: ensure mirror precedence causes this local mirror to
            # be chosen ahead of the remote one when installing deps
            if enable_artifacts_mirror:
                mirror_add_output = spack_cmd('mirror', 'add', 'local_mirror',
                                              artifact_mirror_url)
                tty.debug('spack mirror add:')
                tty.debug(mirror_add_output)

            mirror_list_output = spack_cmd('mirror', 'list')
            tty.debug('listing spack mirrors:')
            tty.debug(mirror_list_output)

            # 2) build up install arguments
            install_args = ['-d', '-v', '-k', 'install', '--keep-stage']

            # 3) create/register a new build on CDash (if enabled)
            cdash_args = []
            if enable_cdash:
                tty.debug('Registering build with CDash')
                (cdash_build_id,
                 cdash_build_stamp) = spack_ci.register_cdash_build(
                     cdash_build_name, cdash_base_url, cdash_project,
                     cdash_site, job_spec_buildgroup)

                cdash_upload_url = '{0}/submit.php?project={1}'.format(
                    cdash_base_url, cdash_project_enc)

                cdash_args = [
                    '--cdash-upload-url',
                    cdash_upload_url,
                    '--cdash-build',
                    cdash_build_name,
                    '--cdash-site',
                    cdash_site,
                    '--cdash-buildstamp',
                    cdash_build_stamp,
                ]

            spec_cli_arg = [job_spec_yaml_path]

            tty.debug('Installing package')

            try:
                # Two-pass install is intended to avoid spack trying to
                # install from buildcache even though the locally computed
                # full hash is different than the one stored in the spec.yaml
                # file on the remote mirror.
                first_pass_args = install_args + [
                    '--cache-only',
                    '--only',
                    'dependencies',
                ]
                first_pass_args.extend(spec_cli_arg)
                tty.debug('First pass install arguments: {0}'.format(
                    first_pass_args))
                spack_cmd(*first_pass_args)

                # Overwrite the changed environment file so it doesn't break
                # the next install invocation.
                tty.debug('Copying {0} to {1}'.format(env_dst_path,
                                                      env_src_path))
                shutil.copyfile(env_dst_path, env_src_path)

                second_pass_args = install_args + [
                    '--no-cache',
                    '--only',
                    'package',
                ]
                second_pass_args.extend(cdash_args)
                second_pass_args.extend(spec_cli_arg)
                tty.debug('Second pass install arguments: {0}'.format(
                    second_pass_args))
                spack_cmd(*second_pass_args)
            except Exception as inst:
                tty.error('Caught exception during install:')
                tty.error(inst)

            spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)

            # 4) create buildcache on remote mirror, but not if this is
            # running to test a spack PR
            if not spack_is_pr_pipeline:
                spack_ci.push_mirror_contents(env, job_spec,
                                              job_spec_yaml_path,
                                              remote_mirror_url,
                                              cdash_build_id)

            # 5) create another copy of that buildcache on "local artifact
            # mirror" (only done if cash reporting is enabled)
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          artifact_mirror_url, cdash_build_id)

            # 6) relate this build to its dependencies on CDash (if enabled)
            if enable_cdash:
                spack_ci.relate_cdash_builds(
                    spec_map, cdash_base_url, cdash_build_id, cdash_project,
                    artifact_mirror_url or remote_mirror_url)
        else:
            # There is nothing to do here unless "local artifact mirror" is
            # enabled, in which case, we need to download the buildcache to
            # the local artifacts directory to be used by dependent jobs in
            # subsequent stages
            tty.debug('No need to rebuild {0}'.format(job_spec_pkg_name))
            if enable_artifacts_mirror:
                tty.debug('Getting {0} buildcache'.format(job_spec_pkg_name))
                tty.debug('Downloading to {0}'.format(build_cache_dir))
                buildcache.download_buildcache_files(job_spec, build_cache_dir,
                                                     True, remote_mirror_url)
예제 #18
0
def generate_gitlab_ci_yaml(env,
                            print_summary,
                            output_file,
                            run_optimizer=False,
                            use_dependencies=False):
    # FIXME: What's the difference between one that opens with 'spack'
    # and one that opens with 'env'?  This will only handle the former.
    with spack.concretize.disable_compiler_existence_check():
        env.concretize()

    yaml_root = ev.config_dict(env.yaml)

    if 'gitlab-ci' not in yaml_root:
        tty.die('Environment yaml does not have "gitlab-ci" section')

    gitlab_ci = yaml_root['gitlab-ci']

    final_job_config = None
    if 'final-stage-rebuild-index' in gitlab_ci:
        final_job_config = gitlab_ci['final-stage-rebuild-index']

    build_group = None
    enable_cdash_reporting = False
    cdash_auth_token = None

    if 'cdash' in yaml_root:
        enable_cdash_reporting = True
        ci_cdash = yaml_root['cdash']
        build_group = ci_cdash['build-group']
        cdash_url = ci_cdash['url']
        cdash_project = ci_cdash['project']
        cdash_site = ci_cdash['site']

        if 'SPACK_CDASH_AUTH_TOKEN' in os.environ:
            tty.verbose("Using CDash auth token from environment")
            cdash_auth_token = os.environ.get('SPACK_CDASH_AUTH_TOKEN')

    is_pr_pipeline = (os.environ.get('SPACK_IS_PR_PIPELINE',
                                     '').lower() == 'true')

    ci_mirrors = yaml_root['mirrors']
    mirror_urls = [url for url in ci_mirrors.values()]

    enable_artifacts_buildcache = False
    if 'enable-artifacts-buildcache' in gitlab_ci:
        enable_artifacts_buildcache = gitlab_ci['enable-artifacts-buildcache']

    bootstrap_specs = []
    phases = []
    if 'bootstrap' in gitlab_ci:
        for phase in gitlab_ci['bootstrap']:
            try:
                phase_name = phase.get('name')
                strip_compilers = phase.get('compiler-agnostic')
            except AttributeError:
                phase_name = phase
                strip_compilers = False
            phases.append({
                'name': phase_name,
                'strip-compilers': strip_compilers,
            })

            for bs in env.spec_lists[phase_name]:
                bootstrap_specs.append({
                    'spec': bs,
                    'phase-name': phase_name,
                    'strip-compilers': strip_compilers,
                })

    phases.append({
        'name': 'specs',
        'strip-compilers': False,
    })

    staged_phases = {}
    for phase in phases:
        phase_name = phase['name']
        with spack.concretize.disable_compiler_existence_check():
            staged_phases[phase_name] = stage_spec_jobs(
                env.spec_lists[phase_name])

    if print_summary:
        for phase in phases:
            phase_name = phase['name']
            tty.msg('Stages for phase "{0}"'.format(phase_name))
            phase_stages = staged_phases[phase_name]
            print_staging_summary(*phase_stages)

    all_job_names = []
    output_object = {}
    job_id = 0
    stage_id = 0

    stage_names = []

    max_length_needs = 0
    max_needs_job = ''

    for phase in phases:
        phase_name = phase['name']
        strip_compilers = phase['strip-compilers']

        main_phase = is_main_phase(phase_name)
        spec_labels, dependencies, stages = staged_phases[phase_name]

        for stage_jobs in stages:
            stage_name = 'stage-{0}'.format(stage_id)
            stage_names.append(stage_name)
            stage_id += 1

            for spec_label in stage_jobs:
                root_spec = spec_labels[spec_label]['rootSpec']
                pkg_name = pkg_name_from_spec_label(spec_label)
                release_spec = root_spec[pkg_name]

                runner_attribs = find_matching_config(release_spec, gitlab_ci)

                if not runner_attribs:
                    tty.warn('No match found for {0}, skipping it'.format(
                        release_spec))
                    continue

                tags = [tag for tag in runner_attribs['tags']]

                variables = {}
                if 'variables' in runner_attribs:
                    variables.update(runner_attribs['variables'])

                image_name = None
                image_entry = None
                if 'image' in runner_attribs:
                    build_image = runner_attribs['image']
                    try:
                        image_name = build_image.get('name')
                        entrypoint = build_image.get('entrypoint')
                        image_entry = [p for p in entrypoint]
                    except AttributeError:
                        image_name = build_image

                job_script = [
                    'spack env activate --without-view .',
                    'spack ci rebuild',
                ]
                if 'script' in runner_attribs:
                    job_script = [s for s in runner_attribs['script']]

                before_script = None
                if 'before_script' in runner_attribs:
                    before_script = [
                        s for s in runner_attribs['before_script']
                    ]

                after_script = None
                if 'after_script' in runner_attribs:
                    after_script = [s for s in runner_attribs['after_script']]

                osname = str(release_spec.architecture)
                job_name = get_job_name(phase_name, strip_compilers,
                                        release_spec, osname, build_group)

                compiler_action = 'NONE'
                if len(phases) > 1:
                    compiler_action = 'FIND_ANY'
                    if is_main_phase(phase_name):
                        compiler_action = 'INSTALL_MISSING'

                job_vars = {
                    'SPACK_ROOT_SPEC':
                    format_root_spec(root_spec, main_phase, strip_compilers),
                    'SPACK_JOB_SPEC_PKG_NAME':
                    release_spec.name,
                    'SPACK_COMPILER_ACTION':
                    compiler_action,
                    'SPACK_IS_PR_PIPELINE':
                    str(is_pr_pipeline),
                }

                job_dependencies = []
                if spec_label in dependencies:
                    if enable_artifacts_buildcache:
                        dep_jobs = [
                            d for d in release_spec.traverse(deptype=all,
                                                             root=False)
                        ]
                    else:
                        dep_jobs = []
                        for dep_label in dependencies[spec_label]:
                            dep_pkg = pkg_name_from_spec_label(dep_label)
                            dep_root = spec_labels[dep_label]['rootSpec']
                            dep_jobs.append(dep_root[dep_pkg])

                    job_dependencies.extend(
                        format_job_needs(phase_name, strip_compilers, dep_jobs,
                                         osname, build_group,
                                         enable_artifacts_buildcache))

                # This next section helps gitlab make sure the right
                # bootstrapped compiler exists in the artifacts buildcache by
                # creating an artificial dependency between this spec and its
                # compiler.  So, if we are in the main phase, and if the
                # compiler we are supposed to use is listed in any of the
                # bootstrap spec lists, then we will add more dependencies to
                # the job (that compiler and maybe it's dependencies as well).
                if is_main_phase(phase_name):
                    compiler_pkg_spec = compilers.pkg_spec_for_compiler(
                        release_spec.compiler)
                    for bs in bootstrap_specs:
                        bs_arch = bs['spec'].architecture
                        if (bs['spec'].satisfies(compiler_pkg_spec)
                                and bs_arch == release_spec.architecture):
                            # We found the bootstrap compiler this release spec
                            # should be built with, so for DAG scheduling
                            # purposes, we will at least add the compiler spec
                            # to the jobs "needs".  But if artifact buildcache
                            # is enabled, we'll have to add all transtive deps
                            # of the compiler as well.
                            dep_jobs = [bs['spec']]
                            if enable_artifacts_buildcache:
                                dep_jobs = [
                                    d for d in bs['spec'].traverse(deptype=all)
                                ]

                            job_dependencies.extend(
                                format_job_needs(bs['phase-name'],
                                                 bs['strip-compilers'],
                                                 dep_jobs, str(bs_arch),
                                                 build_group,
                                                 enable_artifacts_buildcache))

                if enable_cdash_reporting:
                    cdash_build_name = get_cdash_build_name(
                        release_spec, build_group)
                    all_job_names.append(cdash_build_name)

                    related_builds = []  # Used for relating CDash builds
                    if spec_label in dependencies:
                        related_builds = ([
                            spec_labels[d]['spec'].name
                            for d in dependencies[spec_label]
                        ])

                    job_vars['SPACK_CDASH_BUILD_NAME'] = cdash_build_name
                    job_vars['SPACK_RELATED_BUILDS_CDASH'] = ';'.join(
                        sorted(related_builds))

                variables.update(job_vars)

                artifact_paths = [
                    'jobs_scratch_dir',
                    'cdash_report',
                ]

                if enable_artifacts_buildcache:
                    bc_root = 'local_mirror/build_cache'
                    artifact_paths.extend([
                        os.path.join(bc_root, p) for p in [
                            bindist.tarball_name(release_spec, '.spec.yaml'),
                            bindist.tarball_name(release_spec, '.cdashid'),
                            bindist.tarball_directory_name(release_spec),
                        ]
                    ])

                job_object = {
                    'stage': stage_name,
                    'variables': variables,
                    'script': job_script,
                    'tags': tags,
                    'artifacts': {
                        'paths': artifact_paths,
                        'when': 'always',
                    },
                    'needs': sorted(job_dependencies, key=lambda d: d['job']),
                    'retry': {
                        'max': 2,
                        'when': JOB_RETRY_CONDITIONS,
                    }
                }

                length_needs = len(job_dependencies)
                if length_needs > max_length_needs:
                    max_length_needs = length_needs
                    max_needs_job = job_name

                if before_script:
                    job_object['before_script'] = before_script

                if after_script:
                    job_object['after_script'] = after_script

                if image_name:
                    job_object['image'] = image_name
                    if image_entry is not None:
                        job_object['image'] = {
                            'name': image_name,
                            'entrypoint': image_entry,
                        }

                output_object[job_name] = job_object
                job_id += 1

    tty.debug('{0} build jobs generated in {1} stages'.format(
        job_id, stage_id))

    tty.debug('The max_needs_job is {0}, with {1} needs'.format(
        max_needs_job, max_length_needs))

    # Use "all_job_names" to populate the build group for this set
    if enable_cdash_reporting and cdash_auth_token:
        try:
            populate_buildgroup(all_job_names, build_group, cdash_project,
                                cdash_site, cdash_auth_token, cdash_url)
        except (SpackError, HTTPError, URLError) as err:
            tty.warn('Problem populating buildgroup: {0}'.format(err))
    else:
        tty.warn('Unable to populate buildgroup without CDash credentials')

    if final_job_config and not is_pr_pipeline:
        # Add an extra, final job to regenerate the index
        final_stage = 'stage-rebuild-index'
        final_job = {
            'stage':
            final_stage,
            'script':
            'spack buildcache update-index --keys -d {0}'.format(
                mirror_urls[0]),
            'tags':
            final_job_config['tags'],
            'when':
            'always'
        }
        if 'image' in final_job_config:
            final_job['image'] = final_job_config['image']
        if before_script:
            final_job['before_script'] = before_script
        if after_script:
            final_job['after_script'] = after_script
        output_object['rebuild-index'] = final_job
        stage_names.append(final_stage)

    output_object['stages'] = stage_names

    # Capture the version of spack used to generate the pipeline, transform it
    # into a value that can be passed to "git checkout", and save it in a
    # global yaml variable
    spack_version = spack.main.get_version()
    version_to_clone = None
    v_match = re.match(r"^\d+\.\d+\.\d+$", spack_version)
    if v_match:
        version_to_clone = 'v{0}'.format(v_match.group(0))
    else:
        v_match = re.match(r"^[^-]+-[^-]+-([a-f\d]+)$", spack_version)
        if v_match:
            version_to_clone = v_match.group(1)
        else:
            version_to_clone = spack_version

    output_object['variables'] = {
        'SPACK_VERSION': spack_version,
        'SPACK_CHECKOUT_VERSION': version_to_clone,
    }

    sorted_output = {}
    for output_key, output_value in sorted(output_object.items()):
        sorted_output[output_key] = output_value

    # TODO(opadron): remove this or refactor
    if run_optimizer:
        import spack.ci_optimization as ci_opt
        sorted_output = ci_opt.optimizer(sorted_output)

    # TODO(opadron): remove this or refactor
    if use_dependencies:
        import spack.ci_needs_workaround as cinw
        sorted_output = cinw.needs_to_dependencies(sorted_output)

    with open(output_file, 'w') as outf:
        outf.write(syaml.dump_config(sorted_output, default_flow_style=True))